1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
265 sha256_of_onion: [u8; 32],
269 macro_rules! define_state_flags {
270 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
271 #[doc = $flag_type_doc]
272 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
273 struct $flag_type(u32);
278 const $flag: $flag_type = $flag_type($value);
281 /// All flags that apply to the specified [`ChannelState`] variant.
283 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
286 fn new() -> Self { Self(0) }
289 fn from_u32(flags: u32) -> Result<Self, ()> {
290 if flags & !Self::ALL.0 != 0 {
293 Ok($flag_type(flags))
298 fn is_empty(&self) -> bool { self.0 == 0 }
301 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
304 impl core::ops::Not for $flag_type {
306 fn not(self) -> Self::Output { Self(!self.0) }
308 impl core::ops::BitOr for $flag_type {
310 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
312 impl core::ops::BitOrAssign for $flag_type {
313 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
315 impl core::ops::BitAnd for $flag_type {
317 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
319 impl core::ops::BitAndAssign for $flag_type {
320 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
323 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
324 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
326 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
327 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
328 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
330 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
332 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
333 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
335 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
337 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
339 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
340 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
342 impl PartialEq<FundedStateFlags> for $flag_type {
343 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
345 impl From<FundedStateFlags> for $flag_type {
346 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
351 /// We declare all the states/flags here together to help determine which bits are still available
354 pub const OUR_INIT_SENT: u32 = 1 << 0;
355 pub const THEIR_INIT_SENT: u32 = 1 << 1;
356 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
357 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
358 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
359 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
360 pub const CHANNEL_READY: u32 = 1 << 6;
361 pub const PEER_DISCONNECTED: u32 = 1 << 7;
362 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
363 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
364 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
365 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
366 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
367 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
371 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
373 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
374 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
375 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
376 somewhere and we should pause sending any outbound messages until they've managed to \
377 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
378 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
379 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
380 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
381 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
382 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
387 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
388 NegotiatingFundingFlags, [
389 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
390 OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
391 ("Indicates we have received their `open_channel`/`accept_channel` message.",
392 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
397 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
398 FUNDED_STATE, AwaitingChannelReadyFlags, [
399 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
400 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
401 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
402 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
403 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
404 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
405 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
406 is being held until all channels in the batch have received `funding_signed` and have \
407 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
412 "Flags that only apply to [`ChannelState::ChannelReady`].",
413 FUNDED_STATE, ChannelReadyFlags, [
414 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
415 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
416 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
417 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
418 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
422 // Note that the order of this enum is implicitly defined by where each variant is placed. Take this
423 // into account when introducing new states and update `test_channel_state_order` accordingly.
424 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
426 /// We are negotiating the parameters required for the channel prior to funding it.
427 NegotiatingFunding(NegotiatingFundingFlags),
428 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
429 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
430 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
432 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
433 /// funding transaction to confirm.
434 AwaitingChannelReady(AwaitingChannelReadyFlags),
435 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
437 ChannelReady(ChannelReadyFlags),
438 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
439 /// is about to drop us, but we store this anyway.
443 macro_rules! impl_state_flag {
444 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
446 fn $get(&self) -> bool {
449 ChannelState::$state(flags) => flags.is_set($state_flag.into()),
458 ChannelState::$state(flags) => *flags |= $state_flag,
460 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
464 fn $clear(&mut self) {
467 ChannelState::$state(flags) => *flags &= !($state_flag),
469 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
473 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
474 impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
476 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
477 impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
482 fn from_u32(state: u32) -> Result<Self, ()> {
484 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
485 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
487 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
488 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
489 .map(|flags| ChannelState::AwaitingChannelReady(flags))
490 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
491 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
492 .map(|flags| ChannelState::ChannelReady(flags))
493 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
494 Ok(ChannelState::NegotiatingFunding(flags))
502 fn to_u32(&self) -> u32 {
504 ChannelState::NegotiatingFunding(flags) => flags.0,
505 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
506 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
507 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
508 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
512 fn is_pre_funded_state(&self) -> bool {
513 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
516 fn is_both_sides_shutdown(&self) -> bool {
517 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
520 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
522 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
523 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
524 _ => FundedStateFlags::new(),
528 fn should_force_holding_cell(&self) -> bool {
530 ChannelState::ChannelReady(flags) =>
531 flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
532 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
533 flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
535 debug_assert!(false, "The holding cell is only valid within ChannelReady");
541 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
542 FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
543 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
544 FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
545 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
546 FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
547 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
548 FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
549 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
550 AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
551 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
552 AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
553 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
554 AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
555 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
556 ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
559 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
561 pub const DEFAULT_MAX_HTLCS: u16 = 50;
563 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
564 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
565 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
566 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
570 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
572 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
574 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
576 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
577 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
578 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
579 /// `holder_max_htlc_value_in_flight_msat`.
580 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
582 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
583 /// `option_support_large_channel` (aka wumbo channels) is not supported.
585 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
587 /// Total bitcoin supply in satoshis.
588 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
590 /// The maximum network dust limit for standard script formats. This currently represents the
591 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
592 /// transaction non-standard and thus refuses to relay it.
593 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
594 /// implementations use this value for their dust limit today.
595 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
597 /// The maximum channel dust limit we will accept from our counterparty.
598 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
600 /// The dust limit is used for both the commitment transaction outputs as well as the closing
601 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
602 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
603 /// In order to avoid having to concern ourselves with standardness during the closing process, we
604 /// simply require our counterparty to use a dust limit which will leave any segwit output
606 /// See <https://github.com/lightning/bolts/issues/905> for more details.
607 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
609 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
610 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
612 /// Used to return a simple Error back to ChannelManager. Will get converted to a
613 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
614 /// channel_id in ChannelManager.
615 pub(super) enum ChannelError {
621 impl fmt::Debug for ChannelError {
622 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
624 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
625 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
626 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
631 impl fmt::Display for ChannelError {
632 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
634 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
635 &ChannelError::Warn(ref e) => write!(f, "{}", e),
636 &ChannelError::Close(ref e) => write!(f, "{}", e),
641 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
643 pub peer_id: Option<PublicKey>,
644 pub channel_id: Option<ChannelId>,
647 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
648 fn log(&self, mut record: Record) {
649 record.peer_id = self.peer_id;
650 record.channel_id = self.channel_id;
651 self.logger.log(record)
655 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
656 where L::Target: Logger {
657 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
658 where S::Target: SignerProvider
662 peer_id: Some(context.counterparty_node_id),
663 channel_id: Some(context.channel_id),
668 macro_rules! secp_check {
669 ($res: expr, $err: expr) => {
672 Err(_) => return Err(ChannelError::Close($err)),
677 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
678 /// our counterparty or not. However, we don't want to announce updates right away to avoid
679 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
680 /// our channel_update message and track the current state here.
681 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
682 #[derive(Clone, Copy, PartialEq)]
683 pub(super) enum ChannelUpdateStatus {
684 /// We've announced the channel as enabled and are connected to our peer.
686 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
688 /// Our channel is live again, but we haven't announced the channel as enabled yet.
690 /// We've announced the channel as disabled.
694 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
696 pub enum AnnouncementSigsState {
697 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
698 /// we sent the last `AnnouncementSignatures`.
700 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
701 /// This state never appears on disk - instead we write `NotSent`.
703 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
704 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
705 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
706 /// they send back a `RevokeAndACK`.
707 /// This state never appears on disk - instead we write `NotSent`.
709 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
710 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
714 /// An enum indicating whether the local or remote side offered a given HTLC.
720 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
723 pending_htlcs_value_msat: u64,
724 on_counterparty_tx_dust_exposure_msat: u64,
725 on_holder_tx_dust_exposure_msat: u64,
726 holding_cell_msat: u64,
727 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
730 /// An enum gathering stats on commitment transaction, either local or remote.
731 struct CommitmentStats<'a> {
732 tx: CommitmentTransaction, // the transaction info
733 feerate_per_kw: u32, // the feerate included to build the transaction
734 total_fee_sat: u64, // the total fee included in the transaction
735 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
736 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
737 local_balance_msat: u64, // local balance before fees *not* considering dust limits
738 remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
739 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
740 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
743 /// Used when calculating whether we or the remote can afford an additional HTLC.
744 struct HTLCCandidate {
746 origin: HTLCInitiator,
750 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
758 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
760 enum UpdateFulfillFetch {
762 monitor_update: ChannelMonitorUpdate,
763 htlc_value_msat: u64,
764 msg: Option<msgs::UpdateFulfillHTLC>,
769 /// The return type of get_update_fulfill_htlc_and_commit.
770 pub enum UpdateFulfillCommitFetch {
771 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
772 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
773 /// previously placed in the holding cell (and has since been removed).
775 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
776 monitor_update: ChannelMonitorUpdate,
777 /// The value of the HTLC which was claimed, in msat.
778 htlc_value_msat: u64,
780 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
781 /// or has been forgotten (presumably previously claimed).
785 /// The return value of `monitor_updating_restored`
786 pub(super) struct MonitorRestoreUpdates {
787 pub raa: Option<msgs::RevokeAndACK>,
788 pub commitment_update: Option<msgs::CommitmentUpdate>,
789 pub order: RAACommitmentOrder,
790 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
791 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
792 pub finalized_claimed_htlcs: Vec<HTLCSource>,
793 pub funding_broadcastable: Option<Transaction>,
794 pub channel_ready: Option<msgs::ChannelReady>,
795 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
798 /// The return value of `signer_maybe_unblocked`
800 pub(super) struct SignerResumeUpdates {
801 pub commitment_update: Option<msgs::CommitmentUpdate>,
802 pub funding_signed: Option<msgs::FundingSigned>,
803 pub channel_ready: Option<msgs::ChannelReady>,
806 /// The return value of `channel_reestablish`
807 pub(super) struct ReestablishResponses {
808 pub channel_ready: Option<msgs::ChannelReady>,
809 pub raa: Option<msgs::RevokeAndACK>,
810 pub commitment_update: Option<msgs::CommitmentUpdate>,
811 pub order: RAACommitmentOrder,
812 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
813 pub shutdown_msg: Option<msgs::Shutdown>,
816 /// The result of a shutdown that should be handled.
818 pub(crate) struct ShutdownResult {
819 pub(crate) closure_reason: ClosureReason,
820 /// A channel monitor update to apply.
821 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
822 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
823 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
824 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
825 /// propagated to the remainder of the batch.
826 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
827 pub(crate) channel_id: ChannelId,
828 pub(crate) user_channel_id: u128,
829 pub(crate) channel_capacity_satoshis: u64,
830 pub(crate) counterparty_node_id: PublicKey,
831 pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
834 /// If the majority of the channels funds are to the fundee and the initiator holds only just
835 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
836 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
837 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
838 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
839 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
840 /// by this multiple without hitting this case, before sending.
841 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
842 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
843 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
844 /// leave the channel less usable as we hold a bigger reserve.
845 #[cfg(any(fuzzing, test))]
846 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
847 #[cfg(not(any(fuzzing, test)))]
848 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
850 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
851 /// channel creation on an inbound channel, we simply force-close and move on.
852 /// This constant is the one suggested in BOLT 2.
853 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
855 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
856 /// not have enough balance value remaining to cover the onchain cost of this new
857 /// HTLC weight. If this happens, our counterparty fails the reception of our
858 /// commitment_signed including this new HTLC due to infringement on the channel
860 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
861 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
862 /// leads to a channel force-close. Ultimately, this is an issue coming from the
863 /// design of LN state machines, allowing asynchronous updates.
864 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
866 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
867 /// commitment transaction fees, with at least this many HTLCs present on the commitment
868 /// transaction (not counting the value of the HTLCs themselves).
869 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
871 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
872 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
873 /// ChannelUpdate prompted by the config update. This value was determined as follows:
875 /// * The expected interval between ticks (1 minute).
876 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
877 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
878 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
879 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
881 /// The number of ticks that may elapse while we're waiting for a response to a
882 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
885 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
886 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
888 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
889 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
890 /// exceeding this age limit will be force-closed and purged from memory.
891 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
893 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
894 pub(crate) const COINBASE_MATURITY: u32 = 100;
896 struct PendingChannelMonitorUpdate {
897 update: ChannelMonitorUpdate,
900 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
901 (0, update, required),
904 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
905 /// its variants containing an appropriate channel struct.
906 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
907 UnfundedOutboundV1(OutboundV1Channel<SP>),
908 UnfundedInboundV1(InboundV1Channel<SP>),
912 impl<'a, SP: Deref> ChannelPhase<SP> where
913 SP::Target: SignerProvider,
914 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
916 pub fn context(&'a self) -> &'a ChannelContext<SP> {
918 ChannelPhase::Funded(chan) => &chan.context,
919 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
920 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
924 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
926 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
927 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
928 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
933 /// Contains all state common to unfunded inbound/outbound channels.
934 pub(super) struct UnfundedChannelContext {
935 /// A counter tracking how many ticks have elapsed since this unfunded channel was
936 /// created. If this unfunded channel reaches peer has yet to respond after reaching
937 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
939 /// This is so that we don't keep channels around that haven't progressed to a funded state
940 /// in a timely manner.
941 unfunded_channel_age_ticks: usize,
944 impl UnfundedChannelContext {
945 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
946 /// having reached the unfunded channel age limit.
948 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
949 pub fn should_expire_unfunded_channel(&mut self) -> bool {
950 self.unfunded_channel_age_ticks += 1;
951 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
955 /// Contains everything about the channel including state, and various flags.
956 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
957 config: LegacyChannelConfig,
959 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
960 // constructed using it. The second element in the tuple corresponds to the number of ticks that
961 // have elapsed since the update occurred.
962 prev_config: Option<(ChannelConfig, usize)>,
964 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
968 /// The current channel ID.
969 channel_id: ChannelId,
970 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
971 /// Will be `None` for channels created prior to 0.0.115.
972 temporary_channel_id: Option<ChannelId>,
973 channel_state: ChannelState,
975 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
976 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
978 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
979 // Note that a number of our tests were written prior to the behavior here which retransmits
980 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
982 #[cfg(any(test, feature = "_test_utils"))]
983 pub(crate) announcement_sigs_state: AnnouncementSigsState,
984 #[cfg(not(any(test, feature = "_test_utils")))]
985 announcement_sigs_state: AnnouncementSigsState,
987 secp_ctx: Secp256k1<secp256k1::All>,
988 channel_value_satoshis: u64,
990 latest_monitor_update_id: u64,
992 holder_signer: ChannelSignerType<SP>,
993 shutdown_scriptpubkey: Option<ShutdownScript>,
994 destination_script: ScriptBuf,
996 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
997 // generation start at 0 and count up...this simplifies some parts of implementation at the
998 // cost of others, but should really just be changed.
1000 cur_holder_commitment_transaction_number: u64,
1001 cur_counterparty_commitment_transaction_number: u64,
1002 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
1003 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
1004 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
1005 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1007 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1008 /// need to ensure we resend them in the order we originally generated them. Note that because
1009 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1010 /// sufficient to simply set this to the opposite of any message we are generating as we
1011 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1012 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1014 resend_order: RAACommitmentOrder,
1016 monitor_pending_channel_ready: bool,
1017 monitor_pending_revoke_and_ack: bool,
1018 monitor_pending_commitment_signed: bool,
1020 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1021 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1022 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1023 // but need to handle this somehow or we run the risk of losing HTLCs!
1024 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1025 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1026 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1028 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1029 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1030 /// the future when the signer indicates it may have a signature for us.
1032 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1033 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1034 signer_pending_commitment_update: bool,
1035 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1036 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1037 /// outbound or inbound.
1038 signer_pending_funding: bool,
1040 // pending_update_fee is filled when sending and receiving update_fee.
1042 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1043 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1044 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1045 // HTLCs with similar state.
1046 pending_update_fee: Option<(u32, FeeUpdateState)>,
1047 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1048 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1049 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1050 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1051 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1052 holding_cell_update_fee: Option<u32>,
1053 next_holder_htlc_id: u64,
1054 next_counterparty_htlc_id: u64,
1055 feerate_per_kw: u32,
1057 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1058 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1059 /// new block is received, ensuring it's always at least moderately close to the current real
1061 update_time_counter: u32,
1063 #[cfg(debug_assertions)]
1064 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1065 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1066 #[cfg(debug_assertions)]
1067 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1068 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1070 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1071 target_closing_feerate_sats_per_kw: Option<u32>,
1073 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1074 /// update, we need to delay processing it until later. We do that here by simply storing the
1075 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1076 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1078 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1079 /// transaction. These are set once we reach `closing_negotiation_ready`.
1081 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1083 closing_fee_limits: Option<(u64, u64)>,
1085 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1086 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1087 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1088 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1089 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1091 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1092 /// until we see a `commitment_signed` before doing so.
1094 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1095 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1096 expecting_peer_commitment_signed: bool,
1098 /// The hash of the block in which the funding transaction was included.
1099 funding_tx_confirmed_in: Option<BlockHash>,
1100 funding_tx_confirmation_height: u32,
1101 short_channel_id: Option<u64>,
1102 /// Either the height at which this channel was created or the height at which it was last
1103 /// serialized if it was serialized by versions prior to 0.0.103.
1104 /// We use this to close if funding is never broadcasted.
1105 channel_creation_height: u32,
1107 counterparty_dust_limit_satoshis: u64,
1110 pub(super) holder_dust_limit_satoshis: u64,
1112 holder_dust_limit_satoshis: u64,
1115 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1117 counterparty_max_htlc_value_in_flight_msat: u64,
1120 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1122 holder_max_htlc_value_in_flight_msat: u64,
1124 /// minimum channel reserve for self to maintain - set by them.
1125 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1128 pub(super) holder_selected_channel_reserve_satoshis: u64,
1130 holder_selected_channel_reserve_satoshis: u64,
1132 counterparty_htlc_minimum_msat: u64,
1133 holder_htlc_minimum_msat: u64,
1135 pub counterparty_max_accepted_htlcs: u16,
1137 counterparty_max_accepted_htlcs: u16,
1138 holder_max_accepted_htlcs: u16,
1139 minimum_depth: Option<u32>,
1141 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1143 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1144 funding_transaction: Option<Transaction>,
1145 is_batch_funding: Option<()>,
1147 counterparty_cur_commitment_point: Option<PublicKey>,
1148 counterparty_prev_commitment_point: Option<PublicKey>,
1149 counterparty_node_id: PublicKey,
1151 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1153 commitment_secrets: CounterpartyCommitmentSecrets,
1155 channel_update_status: ChannelUpdateStatus,
1156 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1157 /// not complete within a single timer tick (one minute), we should force-close the channel.
1158 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1160 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1161 /// our peer and start the closing_signed negotiation fresh.
1162 closing_signed_in_flight: bool,
1164 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1165 /// This can be used to rebroadcast the channel_announcement message later.
1166 announcement_sigs: Option<(Signature, Signature)>,
1168 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1169 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1170 // be, by comparing the cached values to the fee of the tranaction generated by
1171 // `build_commitment_transaction`.
1172 #[cfg(any(test, fuzzing))]
1173 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1174 #[cfg(any(test, fuzzing))]
1175 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1177 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1178 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1179 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1180 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1181 /// message until we receive a channel_reestablish.
1183 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1184 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1186 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1187 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1188 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1189 /// unblock the state machine.
1191 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1192 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1193 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1195 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1196 /// [`msgs::RevokeAndACK`] message from the counterparty.
1197 sent_message_awaiting_response: Option<usize>,
1199 #[cfg(any(test, fuzzing))]
1200 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1201 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1202 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1203 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1204 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1205 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1206 historical_inbound_htlc_fulfills: HashSet<u64>,
1208 /// This channel's type, as negotiated during channel open
1209 channel_type: ChannelTypeFeatures,
1211 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1212 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1213 // the channel's funding UTXO.
1215 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1216 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1217 // associated channel mapping.
1219 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1220 // to store all of them.
1221 latest_inbound_scid_alias: Option<u64>,
1223 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1224 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1225 // don't currently support node id aliases and eventually privacy should be provided with
1226 // blinded paths instead of simple scid+node_id aliases.
1227 outbound_scid_alias: u64,
1229 // We track whether we already emitted a `ChannelPending` event.
1230 channel_pending_event_emitted: bool,
1232 // We track whether we already emitted a `ChannelReady` event.
1233 channel_ready_event_emitted: bool,
1235 /// The unique identifier used to re-derive the private key material for the channel through
1236 /// [`SignerProvider::derive_channel_signer`].
1237 channel_keys_id: [u8; 32],
1239 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1240 /// store it here and only release it to the `ChannelManager` once it asks for it.
1241 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1244 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1245 /// Allowed in any state (including after shutdown)
1246 pub fn get_update_time_counter(&self) -> u32 {
1247 self.update_time_counter
1250 pub fn get_latest_monitor_update_id(&self) -> u64 {
1251 self.latest_monitor_update_id
1254 pub fn should_announce(&self) -> bool {
1255 self.config.announced_channel
1258 pub fn is_outbound(&self) -> bool {
1259 self.channel_transaction_parameters.is_outbound_from_holder
1262 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1263 /// Allowed in any state (including after shutdown)
1264 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1265 self.config.options.forwarding_fee_base_msat
1268 /// Returns true if we've ever received a message from the remote end for this Channel
1269 pub fn have_received_message(&self) -> bool {
1270 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1273 /// Returns true if this channel is fully established and not known to be closing.
1274 /// Allowed in any state (including after shutdown)
1275 pub fn is_usable(&self) -> bool {
1276 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1277 !self.channel_state.is_local_shutdown_sent() &&
1278 !self.channel_state.is_remote_shutdown_sent() &&
1279 !self.monitor_pending_channel_ready
1282 /// shutdown state returns the state of the channel in its various stages of shutdown
1283 pub fn shutdown_state(&self) -> ChannelShutdownState {
1284 match self.channel_state {
1285 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1286 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1287 ChannelShutdownState::ShutdownInitiated
1288 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1289 ChannelShutdownState::ResolvingHTLCs
1290 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1291 ChannelShutdownState::NegotiatingClosingFee
1293 ChannelShutdownState::NotShuttingDown
1295 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1296 _ => ChannelShutdownState::NotShuttingDown,
1300 fn closing_negotiation_ready(&self) -> bool {
1301 let is_ready_to_close = match self.channel_state {
1302 ChannelState::AwaitingChannelReady(flags) =>
1303 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1304 ChannelState::ChannelReady(flags) =>
1305 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1308 self.pending_inbound_htlcs.is_empty() &&
1309 self.pending_outbound_htlcs.is_empty() &&
1310 self.pending_update_fee.is_none() &&
1314 /// Returns true if this channel is currently available for use. This is a superset of
1315 /// is_usable() and considers things like the channel being temporarily disabled.
1316 /// Allowed in any state (including after shutdown)
1317 pub fn is_live(&self) -> bool {
1318 self.is_usable() && !self.channel_state.is_peer_disconnected()
1321 // Public utilities:
1323 pub fn channel_id(&self) -> ChannelId {
1327 // Return the `temporary_channel_id` used during channel establishment.
1329 // Will return `None` for channels created prior to LDK version 0.0.115.
1330 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1331 self.temporary_channel_id
1334 pub fn minimum_depth(&self) -> Option<u32> {
1338 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1339 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1340 pub fn get_user_id(&self) -> u128 {
1344 /// Gets the channel's type
1345 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1349 /// Gets the channel's `short_channel_id`.
1351 /// Will return `None` if the channel hasn't been confirmed yet.
1352 pub fn get_short_channel_id(&self) -> Option<u64> {
1353 self.short_channel_id
1356 /// Allowed in any state (including after shutdown)
1357 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1358 self.latest_inbound_scid_alias
1361 /// Allowed in any state (including after shutdown)
1362 pub fn outbound_scid_alias(&self) -> u64 {
1363 self.outbound_scid_alias
1366 /// Returns the holder signer for this channel.
1368 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1369 return &self.holder_signer
1372 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1373 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1374 /// or prior to any channel actions during `Channel` initialization.
1375 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1376 debug_assert_eq!(self.outbound_scid_alias, 0);
1377 self.outbound_scid_alias = outbound_scid_alias;
1380 /// Returns the funding_txo we either got from our peer, or were given by
1381 /// get_funding_created.
1382 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1383 self.channel_transaction_parameters.funding_outpoint
1386 /// Returns the height in which our funding transaction was confirmed.
1387 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1388 let conf_height = self.funding_tx_confirmation_height;
1389 if conf_height > 0 {
1396 /// Returns the block hash in which our funding transaction was confirmed.
1397 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1398 self.funding_tx_confirmed_in
1401 /// Returns the current number of confirmations on the funding transaction.
1402 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1403 if self.funding_tx_confirmation_height == 0 {
1404 // We either haven't seen any confirmation yet, or observed a reorg.
1408 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1411 fn get_holder_selected_contest_delay(&self) -> u16 {
1412 self.channel_transaction_parameters.holder_selected_contest_delay
1415 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1416 &self.channel_transaction_parameters.holder_pubkeys
1419 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1420 self.channel_transaction_parameters.counterparty_parameters
1421 .as_ref().map(|params| params.selected_contest_delay)
1424 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1425 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1428 /// Allowed in any state (including after shutdown)
1429 pub fn get_counterparty_node_id(&self) -> PublicKey {
1430 self.counterparty_node_id
1433 /// Allowed in any state (including after shutdown)
1434 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1435 self.holder_htlc_minimum_msat
1438 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1439 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1440 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1443 /// Allowed in any state (including after shutdown)
1444 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1446 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1447 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1448 // channel might have been used to route very small values (either by honest users or as DoS).
1449 self.channel_value_satoshis * 1000 * 9 / 10,
1451 self.counterparty_max_htlc_value_in_flight_msat
1455 /// Allowed in any state (including after shutdown)
1456 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1457 self.counterparty_htlc_minimum_msat
1460 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1461 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1462 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1465 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1466 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1467 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1469 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1470 party_max_htlc_value_in_flight_msat
1475 pub fn get_value_satoshis(&self) -> u64 {
1476 self.channel_value_satoshis
1479 pub fn get_fee_proportional_millionths(&self) -> u32 {
1480 self.config.options.forwarding_fee_proportional_millionths
1483 pub fn get_cltv_expiry_delta(&self) -> u16 {
1484 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1487 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1488 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1489 where F::Target: FeeEstimator
1491 match self.config.options.max_dust_htlc_exposure {
1492 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1493 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1494 ConfirmationTarget::OnChainSweep) as u64;
1495 feerate_per_kw.saturating_mul(multiplier)
1497 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1501 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1502 pub fn prev_config(&self) -> Option<ChannelConfig> {
1503 self.prev_config.map(|prev_config| prev_config.0)
1506 // Checks whether we should emit a `ChannelPending` event.
1507 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1508 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1511 // Returns whether we already emitted a `ChannelPending` event.
1512 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1513 self.channel_pending_event_emitted
1516 // Remembers that we already emitted a `ChannelPending` event.
1517 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1518 self.channel_pending_event_emitted = true;
1521 // Checks whether we should emit a `ChannelReady` event.
1522 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1523 self.is_usable() && !self.channel_ready_event_emitted
1526 // Remembers that we already emitted a `ChannelReady` event.
1527 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1528 self.channel_ready_event_emitted = true;
1531 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1532 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1533 /// no longer be considered when forwarding HTLCs.
1534 pub fn maybe_expire_prev_config(&mut self) {
1535 if self.prev_config.is_none() {
1538 let prev_config = self.prev_config.as_mut().unwrap();
1540 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1541 self.prev_config = None;
1545 /// Returns the current [`ChannelConfig`] applied to the channel.
1546 pub fn config(&self) -> ChannelConfig {
1550 /// Updates the channel's config. A bool is returned indicating whether the config update
1551 /// applied resulted in a new ChannelUpdate message.
1552 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1553 let did_channel_update =
1554 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1555 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1556 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1557 if did_channel_update {
1558 self.prev_config = Some((self.config.options, 0));
1559 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1560 // policy change to propagate throughout the network.
1561 self.update_time_counter += 1;
1563 self.config.options = *config;
1567 /// Returns true if funding_signed was sent/received and the
1568 /// funding transaction has been broadcast if necessary.
1569 pub fn is_funding_broadcast(&self) -> bool {
1570 !self.channel_state.is_pre_funded_state() &&
1571 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1574 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1575 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1576 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1577 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1578 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1580 /// @local is used only to convert relevant internal structures which refer to remote vs local
1581 /// to decide value of outputs and direction of HTLCs.
1582 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1583 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1584 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1585 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1586 /// which peer generated this transaction and "to whom" this transaction flows.
1588 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1589 where L::Target: Logger
1591 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1592 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1593 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1595 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1596 let mut remote_htlc_total_msat = 0;
1597 let mut local_htlc_total_msat = 0;
1598 let mut value_to_self_msat_offset = 0;
1600 let mut feerate_per_kw = self.feerate_per_kw;
1601 if let Some((feerate, update_state)) = self.pending_update_fee {
1602 if match update_state {
1603 // Note that these match the inclusion criteria when scanning
1604 // pending_inbound_htlcs below.
1605 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1606 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1607 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1609 feerate_per_kw = feerate;
1613 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1614 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1615 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1617 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1619 macro_rules! get_htlc_in_commitment {
1620 ($htlc: expr, $offered: expr) => {
1621 HTLCOutputInCommitment {
1623 amount_msat: $htlc.amount_msat,
1624 cltv_expiry: $htlc.cltv_expiry,
1625 payment_hash: $htlc.payment_hash,
1626 transaction_output_index: None
1631 macro_rules! add_htlc_output {
1632 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1633 if $outbound == local { // "offered HTLC output"
1634 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1635 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1638 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1640 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1641 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1642 included_non_dust_htlcs.push((htlc_in_tx, $source));
1644 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1645 included_dust_htlcs.push((htlc_in_tx, $source));
1648 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1649 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1652 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1654 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1655 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1656 included_non_dust_htlcs.push((htlc_in_tx, $source));
1658 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1659 included_dust_htlcs.push((htlc_in_tx, $source));
1665 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1667 for ref htlc in self.pending_inbound_htlcs.iter() {
1668 let (include, state_name) = match htlc.state {
1669 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1670 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1671 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1672 InboundHTLCState::Committed => (true, "Committed"),
1673 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1677 add_htlc_output!(htlc, false, None, state_name);
1678 remote_htlc_total_msat += htlc.amount_msat;
1680 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1682 &InboundHTLCState::LocalRemoved(ref reason) => {
1683 if generated_by_local {
1684 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1685 inbound_htlc_preimages.push(preimage);
1686 value_to_self_msat_offset += htlc.amount_msat as i64;
1696 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1698 for ref htlc in self.pending_outbound_htlcs.iter() {
1699 let (include, state_name) = match htlc.state {
1700 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1701 OutboundHTLCState::Committed => (true, "Committed"),
1702 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1703 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1704 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1707 let preimage_opt = match htlc.state {
1708 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1709 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1710 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1714 if let Some(preimage) = preimage_opt {
1715 outbound_htlc_preimages.push(preimage);
1719 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1720 local_htlc_total_msat += htlc.amount_msat;
1722 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1724 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1725 value_to_self_msat_offset -= htlc.amount_msat as i64;
1727 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1728 if !generated_by_local {
1729 value_to_self_msat_offset -= htlc.amount_msat as i64;
1737 let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1738 assert!(value_to_self_msat >= 0);
1739 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1740 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1741 // "violate" their reserve value by couting those against it. Thus, we have to convert
1742 // everything to i64 before subtracting as otherwise we can overflow.
1743 let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1744 assert!(value_to_remote_msat >= 0);
1746 #[cfg(debug_assertions)]
1748 // Make sure that the to_self/to_remote is always either past the appropriate
1749 // channel_reserve *or* it is making progress towards it.
1750 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1751 self.holder_max_commitment_tx_output.lock().unwrap()
1753 self.counterparty_max_commitment_tx_output.lock().unwrap()
1755 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1756 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1757 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1758 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1761 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1762 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1763 let (value_to_self, value_to_remote) = if self.is_outbound() {
1764 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1766 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1769 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1770 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1771 let (funding_pubkey_a, funding_pubkey_b) = if local {
1772 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1774 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1777 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1778 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1783 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1784 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1789 let num_nondust_htlcs = included_non_dust_htlcs.len();
1791 let channel_parameters =
1792 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1793 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1794 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1801 &mut included_non_dust_htlcs,
1804 let mut htlcs_included = included_non_dust_htlcs;
1805 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1806 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1807 htlcs_included.append(&mut included_dust_htlcs);
1815 local_balance_msat: value_to_self_msat as u64,
1816 remote_balance_msat: value_to_remote_msat as u64,
1817 inbound_htlc_preimages,
1818 outbound_htlc_preimages,
1823 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1824 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1825 /// our counterparty!)
1826 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1827 /// TODO Some magic rust shit to compile-time check this?
1828 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1829 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1830 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1831 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1832 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1834 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1838 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1839 /// will sign and send to our counterparty.
1840 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1841 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1842 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1843 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1844 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1846 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1849 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1850 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1851 /// Panics if called before accept_channel/InboundV1Channel::new
1852 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1853 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1856 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1857 &self.get_counterparty_pubkeys().funding_pubkey
1860 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1864 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1865 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1866 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1867 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1868 // more dust balance if the feerate increases when we have several HTLCs pending
1869 // which are near the dust limit.
1870 let mut feerate_per_kw = self.feerate_per_kw;
1871 // If there's a pending update fee, use it to ensure we aren't under-estimating
1872 // potential feerate updates coming soon.
1873 if let Some((feerate, _)) = self.pending_update_fee {
1874 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1876 if let Some(feerate) = outbound_feerate_update {
1877 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1879 let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
1880 cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
1883 /// Get forwarding information for the counterparty.
1884 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1885 self.counterparty_forwarding_info.clone()
1888 /// Returns a HTLCStats about inbound pending htlcs
1889 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1891 let mut stats = HTLCStats {
1892 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1893 pending_htlcs_value_msat: 0,
1894 on_counterparty_tx_dust_exposure_msat: 0,
1895 on_holder_tx_dust_exposure_msat: 0,
1896 holding_cell_msat: 0,
1897 on_holder_tx_holding_cell_htlcs_count: 0,
1900 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1903 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1904 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1905 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1907 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1908 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1909 for ref htlc in context.pending_inbound_htlcs.iter() {
1910 stats.pending_htlcs_value_msat += htlc.amount_msat;
1911 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1912 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1914 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1915 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1921 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1922 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1924 let mut stats = HTLCStats {
1925 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1926 pending_htlcs_value_msat: 0,
1927 on_counterparty_tx_dust_exposure_msat: 0,
1928 on_holder_tx_dust_exposure_msat: 0,
1929 holding_cell_msat: 0,
1930 on_holder_tx_holding_cell_htlcs_count: 0,
1933 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1936 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1937 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1938 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1940 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1941 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1942 for ref htlc in context.pending_outbound_htlcs.iter() {
1943 stats.pending_htlcs_value_msat += htlc.amount_msat;
1944 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1945 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1947 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1948 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1952 for update in context.holding_cell_htlc_updates.iter() {
1953 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1954 stats.pending_htlcs += 1;
1955 stats.pending_htlcs_value_msat += amount_msat;
1956 stats.holding_cell_msat += amount_msat;
1957 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1958 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1960 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1961 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1963 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1970 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1971 /// Doesn't bother handling the
1972 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1973 /// corner case properly.
1974 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1975 -> AvailableBalances
1976 where F::Target: FeeEstimator
1978 let context = &self;
1979 // Note that we have to handle overflow due to the above case.
1980 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1981 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1983 let mut balance_msat = context.value_to_self_msat;
1984 for ref htlc in context.pending_inbound_htlcs.iter() {
1985 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1986 balance_msat += htlc.amount_msat;
1989 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1991 let outbound_capacity_msat = context.value_to_self_msat
1992 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1994 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1996 let mut available_capacity_msat = outbound_capacity_msat;
1998 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1999 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2003 if context.is_outbound() {
2004 // We should mind channel commit tx fee when computing how much of the available capacity
2005 // can be used in the next htlc. Mirrors the logic in send_htlc.
2007 // The fee depends on whether the amount we will be sending is above dust or not,
2008 // and the answer will in turn change the amount itself — making it a circular
2010 // This complicates the computation around dust-values, up to the one-htlc-value.
2011 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2012 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2013 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2016 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2017 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2018 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2019 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2020 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2021 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2022 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2025 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2026 // value ends up being below dust, we have this fee available again. In that case,
2027 // match the value to right-below-dust.
2028 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2029 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2030 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2031 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2032 debug_assert!(one_htlc_difference_msat != 0);
2033 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2034 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2035 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2037 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2040 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2041 // sending a new HTLC won't reduce their balance below our reserve threshold.
2042 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2043 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2044 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2047 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2048 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2050 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2051 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2052 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2054 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2055 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2056 // we've selected for them, we can only send dust HTLCs.
2057 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2061 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2063 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2064 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2065 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2066 // send above the dust limit (as the router can always overpay to meet the dust limit).
2067 let mut remaining_msat_below_dust_exposure_limit = None;
2068 let mut dust_exposure_dust_limit_msat = 0;
2069 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2071 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2072 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2074 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2075 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2076 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2078 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2079 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2080 remaining_msat_below_dust_exposure_limit =
2081 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2082 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2085 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2086 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2087 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2088 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2089 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2090 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2093 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2094 if available_capacity_msat < dust_exposure_dust_limit_msat {
2095 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2097 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2101 available_capacity_msat = cmp::min(available_capacity_msat,
2102 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2104 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2105 available_capacity_msat = 0;
2109 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2110 - context.value_to_self_msat as i64
2111 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2112 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2114 outbound_capacity_msat,
2115 next_outbound_htlc_limit_msat: available_capacity_msat,
2116 next_outbound_htlc_minimum_msat,
2121 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2122 let context = &self;
2123 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2126 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2127 /// number of pending HTLCs that are on track to be in our next commitment tx.
2129 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2130 /// `fee_spike_buffer_htlc` is `Some`.
2132 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2133 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2135 /// Dust HTLCs are excluded.
2136 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2137 let context = &self;
2138 assert!(context.is_outbound());
2140 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2143 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2144 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2146 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2147 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2149 let mut addl_htlcs = 0;
2150 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2152 HTLCInitiator::LocalOffered => {
2153 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2157 HTLCInitiator::RemoteOffered => {
2158 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2164 let mut included_htlcs = 0;
2165 for ref htlc in context.pending_inbound_htlcs.iter() {
2166 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2169 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2170 // transaction including this HTLC if it times out before they RAA.
2171 included_htlcs += 1;
2174 for ref htlc in context.pending_outbound_htlcs.iter() {
2175 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2179 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2180 OutboundHTLCState::Committed => included_htlcs += 1,
2181 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2182 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2183 // transaction won't be generated until they send us their next RAA, which will mean
2184 // dropping any HTLCs in this state.
2189 for htlc in context.holding_cell_htlc_updates.iter() {
2191 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2192 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2197 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2198 // ack we're guaranteed to never include them in commitment txs anymore.
2202 let num_htlcs = included_htlcs + addl_htlcs;
2203 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2204 #[cfg(any(test, fuzzing))]
2207 if fee_spike_buffer_htlc.is_some() {
2208 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2210 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2211 + context.holding_cell_htlc_updates.len();
2212 let commitment_tx_info = CommitmentTxInfoCached {
2214 total_pending_htlcs,
2215 next_holder_htlc_id: match htlc.origin {
2216 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2217 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2219 next_counterparty_htlc_id: match htlc.origin {
2220 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2221 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2223 feerate: context.feerate_per_kw,
2225 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2230 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2231 /// pending HTLCs that are on track to be in their next commitment tx
2233 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2234 /// `fee_spike_buffer_htlc` is `Some`.
2236 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2237 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2239 /// Dust HTLCs are excluded.
2240 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2241 let context = &self;
2242 assert!(!context.is_outbound());
2244 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2247 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2248 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2250 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2251 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2253 let mut addl_htlcs = 0;
2254 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2256 HTLCInitiator::LocalOffered => {
2257 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2261 HTLCInitiator::RemoteOffered => {
2262 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2268 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2269 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2270 // committed outbound HTLCs, see below.
2271 let mut included_htlcs = 0;
2272 for ref htlc in context.pending_inbound_htlcs.iter() {
2273 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2276 included_htlcs += 1;
2279 for ref htlc in context.pending_outbound_htlcs.iter() {
2280 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2283 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2284 // i.e. if they've responded to us with an RAA after announcement.
2286 OutboundHTLCState::Committed => included_htlcs += 1,
2287 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2288 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2293 let num_htlcs = included_htlcs + addl_htlcs;
2294 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2295 #[cfg(any(test, fuzzing))]
2298 if fee_spike_buffer_htlc.is_some() {
2299 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2301 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2302 let commitment_tx_info = CommitmentTxInfoCached {
2304 total_pending_htlcs,
2305 next_holder_htlc_id: match htlc.origin {
2306 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2307 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2309 next_counterparty_htlc_id: match htlc.origin {
2310 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2311 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2313 feerate: context.feerate_per_kw,
2315 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2320 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
2321 match self.channel_state {
2322 ChannelState::FundingNegotiated => f(),
2323 ChannelState::AwaitingChannelReady(flags) =>
2324 if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
2325 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
2335 /// Returns the transaction if there is a pending funding transaction that is yet to be
2337 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2338 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2341 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2343 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2344 self.if_unbroadcasted_funding(||
2345 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2349 /// Returns whether the channel is funded in a batch.
2350 pub fn is_batch_funding(&self) -> bool {
2351 self.is_batch_funding.is_some()
2354 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2356 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2357 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2360 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2361 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2362 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2363 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2364 /// immediately (others we will have to allow to time out).
2365 pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
2366 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2367 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2368 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2369 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2370 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2372 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2373 // return them to fail the payment.
2374 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2375 let counterparty_node_id = self.get_counterparty_node_id();
2376 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2378 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2379 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2384 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2385 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2386 // returning a channel monitor update here would imply a channel monitor update before
2387 // we even registered the channel monitor to begin with, which is invalid.
2388 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2389 // funding transaction, don't return a funding txo (which prevents providing the
2390 // monitor update to the user, even if we return one).
2391 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2392 let generate_monitor_update = match self.channel_state {
2393 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
2396 if generate_monitor_update {
2397 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2398 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2399 update_id: self.latest_monitor_update_id,
2400 counterparty_node_id: Some(self.counterparty_node_id),
2401 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2405 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2406 let unbroadcasted_funding_tx = self.unbroadcasted_funding();
2408 self.channel_state = ChannelState::ShutdownComplete;
2409 self.update_time_counter += 1;
2413 dropped_outbound_htlcs,
2414 unbroadcasted_batch_funding_txid,
2415 channel_id: self.channel_id,
2416 user_channel_id: self.user_id,
2417 channel_capacity_satoshis: self.channel_value_satoshis,
2418 counterparty_node_id: self.counterparty_node_id,
2419 unbroadcasted_funding_tx,
2423 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2424 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2425 let counterparty_keys = self.build_remote_transaction_keys();
2426 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2428 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2429 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2430 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2431 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2433 match &self.holder_signer {
2434 // TODO (arik): move match into calling method for Taproot
2435 ChannelSignerType::Ecdsa(ecdsa) => {
2436 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2437 .map(|(signature, _)| msgs::FundingSigned {
2438 channel_id: self.channel_id(),
2441 partial_signature_with_nonce: None,
2445 if funding_signed.is_none() {
2446 #[cfg(not(async_signing))] {
2447 panic!("Failed to get signature for funding_signed");
2449 #[cfg(async_signing)] {
2450 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2451 self.signer_pending_funding = true;
2453 } else if self.signer_pending_funding {
2454 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2455 self.signer_pending_funding = false;
2458 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2459 (counterparty_initial_commitment_tx, funding_signed)
2461 // TODO (taproot|arik)
2468 // Internal utility functions for channels
2470 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2471 /// `channel_value_satoshis` in msat, set through
2472 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2474 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2476 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2477 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2478 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2480 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2483 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2485 channel_value_satoshis * 10 * configured_percent
2488 /// Returns a minimum channel reserve value the remote needs to maintain,
2489 /// required by us according to the configured or default
2490 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2492 /// Guaranteed to return a value no larger than channel_value_satoshis
2494 /// This is used both for outbound and inbound channels and has lower bound
2495 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2496 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2497 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2498 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2501 /// This is for legacy reasons, present for forward-compatibility.
2502 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2503 /// from storage. Hence, we use this function to not persist default values of
2504 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2505 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2506 let (q, _) = channel_value_satoshis.overflowing_div(100);
2507 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2510 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2511 // Note that num_htlcs should not include dust HTLCs.
2513 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2514 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2517 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2518 // Note that num_htlcs should not include dust HTLCs.
2519 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2520 // Note that we need to divide before multiplying to round properly,
2521 // since the lowest denomination of bitcoin on-chain is the satoshi.
2522 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2525 // Holder designates channel data owned for the benefit of the user client.
2526 // Counterparty designates channel data owned by the another channel participant entity.
2527 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2528 pub context: ChannelContext<SP>,
2531 #[cfg(any(test, fuzzing))]
2532 struct CommitmentTxInfoCached {
2534 total_pending_htlcs: usize,
2535 next_holder_htlc_id: u64,
2536 next_counterparty_htlc_id: u64,
2540 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
2541 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
2542 trait FailHTLCContents {
2543 type Message: FailHTLCMessageName;
2544 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
2545 fn to_inbound_htlc_state(self) -> InboundHTLCState;
2546 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
2548 impl FailHTLCContents for msgs::OnionErrorPacket {
2549 type Message = msgs::UpdateFailHTLC;
2550 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2551 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
2553 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2554 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
2556 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2557 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
2560 impl FailHTLCContents for ([u8; 32], u16) {
2561 type Message = msgs::UpdateFailMalformedHTLC;
2562 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2563 msgs::UpdateFailMalformedHTLC {
2566 sha256_of_onion: self.0,
2567 failure_code: self.1
2570 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2571 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
2573 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2574 HTLCUpdateAwaitingACK::FailMalformedHTLC {
2576 sha256_of_onion: self.0,
2577 failure_code: self.1
2582 trait FailHTLCMessageName {
2583 fn name() -> &'static str;
2585 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
2586 fn name() -> &'static str {
2590 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
2591 fn name() -> &'static str {
2592 "update_fail_malformed_htlc"
2596 impl<SP: Deref> Channel<SP> where
2597 SP::Target: SignerProvider,
2598 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2600 fn check_remote_fee<F: Deref, L: Deref>(
2601 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2602 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2603 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2605 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2606 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2608 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2610 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2611 if feerate_per_kw < lower_limit {
2612 if let Some(cur_feerate) = cur_feerate_per_kw {
2613 if feerate_per_kw > cur_feerate {
2615 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2616 cur_feerate, feerate_per_kw);
2620 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2626 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2627 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2628 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2629 // outside of those situations will fail.
2630 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2634 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2639 1 + // script length (0)
2643 )*4 + // * 4 for non-witness parts
2644 2 + // witness marker and flag
2645 1 + // witness element count
2646 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2647 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2648 2*(1 + 71); // two signatures + sighash type flags
2649 if let Some(spk) = a_scriptpubkey {
2650 ret += ((8+1) + // output values and script length
2651 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2653 if let Some(spk) = b_scriptpubkey {
2654 ret += ((8+1) + // output values and script length
2655 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2661 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2662 assert!(self.context.pending_inbound_htlcs.is_empty());
2663 assert!(self.context.pending_outbound_htlcs.is_empty());
2664 assert!(self.context.pending_update_fee.is_none());
2666 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2667 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2668 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2670 if value_to_holder < 0 {
2671 assert!(self.context.is_outbound());
2672 total_fee_satoshis += (-value_to_holder) as u64;
2673 } else if value_to_counterparty < 0 {
2674 assert!(!self.context.is_outbound());
2675 total_fee_satoshis += (-value_to_counterparty) as u64;
2678 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2679 value_to_counterparty = 0;
2682 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2683 value_to_holder = 0;
2686 assert!(self.context.shutdown_scriptpubkey.is_some());
2687 let holder_shutdown_script = self.get_closing_scriptpubkey();
2688 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2689 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2691 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2692 (closing_transaction, total_fee_satoshis)
2695 fn funding_outpoint(&self) -> OutPoint {
2696 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2699 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2702 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2703 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2705 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2707 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2708 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2709 where L::Target: Logger {
2710 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2711 // (see equivalent if condition there).
2712 assert!(self.context.channel_state.should_force_holding_cell());
2713 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2714 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2715 self.context.latest_monitor_update_id = mon_update_id;
2716 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2717 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2721 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2722 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2723 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2724 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2726 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2727 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2730 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2731 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2732 // these, but for now we just have to treat them as normal.
2734 let mut pending_idx = core::usize::MAX;
2735 let mut htlc_value_msat = 0;
2736 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2737 if htlc.htlc_id == htlc_id_arg {
2738 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2739 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2740 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2742 InboundHTLCState::Committed => {},
2743 InboundHTLCState::LocalRemoved(ref reason) => {
2744 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2746 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2747 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2749 return UpdateFulfillFetch::DuplicateClaim {};
2752 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2753 // Don't return in release mode here so that we can update channel_monitor
2757 htlc_value_msat = htlc.amount_msat;
2761 if pending_idx == core::usize::MAX {
2762 #[cfg(any(test, fuzzing))]
2763 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2764 // this is simply a duplicate claim, not previously failed and we lost funds.
2765 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2766 return UpdateFulfillFetch::DuplicateClaim {};
2769 // Now update local state:
2771 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2772 // can claim it even if the channel hits the chain before we see their next commitment.
2773 self.context.latest_monitor_update_id += 1;
2774 let monitor_update = ChannelMonitorUpdate {
2775 update_id: self.context.latest_monitor_update_id,
2776 counterparty_node_id: Some(self.context.counterparty_node_id),
2777 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2778 payment_preimage: payment_preimage_arg.clone(),
2782 if self.context.channel_state.should_force_holding_cell() {
2783 // Note that this condition is the same as the assertion in
2784 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2785 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2786 // do not not get into this branch.
2787 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2788 match pending_update {
2789 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2790 if htlc_id_arg == htlc_id {
2791 // Make sure we don't leave latest_monitor_update_id incremented here:
2792 self.context.latest_monitor_update_id -= 1;
2793 #[cfg(any(test, fuzzing))]
2794 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2795 return UpdateFulfillFetch::DuplicateClaim {};
2798 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2799 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2801 if htlc_id_arg == htlc_id {
2802 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2803 // TODO: We may actually be able to switch to a fulfill here, though its
2804 // rare enough it may not be worth the complexity burden.
2805 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2806 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2812 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
2813 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2814 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2816 #[cfg(any(test, fuzzing))]
2817 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2818 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2820 #[cfg(any(test, fuzzing))]
2821 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2824 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2825 if let InboundHTLCState::Committed = htlc.state {
2827 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2828 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2830 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2831 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2834 UpdateFulfillFetch::NewClaim {
2837 msg: Some(msgs::UpdateFulfillHTLC {
2838 channel_id: self.context.channel_id(),
2839 htlc_id: htlc_id_arg,
2840 payment_preimage: payment_preimage_arg,
2845 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2846 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2847 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2848 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2849 // Even if we aren't supposed to let new monitor updates with commitment state
2850 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2851 // matter what. Sadly, to push a new monitor update which flies before others
2852 // already queued, we have to insert it into the pending queue and update the
2853 // update_ids of all the following monitors.
2854 if release_cs_monitor && msg.is_some() {
2855 let mut additional_update = self.build_commitment_no_status_check(logger);
2856 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2857 // to be strictly increasing by one, so decrement it here.
2858 self.context.latest_monitor_update_id = monitor_update.update_id;
2859 monitor_update.updates.append(&mut additional_update.updates);
2861 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2862 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2863 monitor_update.update_id = new_mon_id;
2864 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2865 held_update.update.update_id += 1;
2868 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2869 let update = self.build_commitment_no_status_check(logger);
2870 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2876 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2877 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2879 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2883 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2884 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2885 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2886 /// before we fail backwards.
2888 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2889 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2890 /// [`ChannelError::Ignore`].
2891 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2892 -> Result<(), ChannelError> where L::Target: Logger {
2893 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2894 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2897 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
2898 /// want to fail blinded HTLCs where we are not the intro node.
2900 /// See [`Self::queue_fail_htlc`] for more info.
2901 pub fn queue_fail_malformed_htlc<L: Deref>(
2902 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
2903 ) -> Result<(), ChannelError> where L::Target: Logger {
2904 self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
2905 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2908 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2909 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2910 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2911 /// before we fail backwards.
2913 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2914 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2915 /// [`ChannelError::Ignore`].
2916 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
2917 &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
2919 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
2920 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2921 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2924 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2925 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2926 // these, but for now we just have to treat them as normal.
2928 let mut pending_idx = core::usize::MAX;
2929 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2930 if htlc.htlc_id == htlc_id_arg {
2932 InboundHTLCState::Committed => {},
2933 InboundHTLCState::LocalRemoved(ref reason) => {
2934 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2936 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2941 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2942 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2948 if pending_idx == core::usize::MAX {
2949 #[cfg(any(test, fuzzing))]
2950 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2951 // is simply a duplicate fail, not previously failed and we failed-back too early.
2952 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2956 if self.context.channel_state.should_force_holding_cell() {
2957 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2958 force_holding_cell = true;
2961 // Now update local state:
2962 if force_holding_cell {
2963 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2964 match pending_update {
2965 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2966 if htlc_id_arg == htlc_id {
2967 #[cfg(any(test, fuzzing))]
2968 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2972 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2973 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2975 if htlc_id_arg == htlc_id {
2976 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2977 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2983 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2984 self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
2988 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
2989 E::Message::name(), &self.context.channel_id());
2991 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2992 htlc.state = err_contents.clone().to_inbound_htlc_state();
2995 Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
2998 // Message handlers:
2999 /// Updates the state of the channel to indicate that all channels in the batch have received
3000 /// funding_signed and persisted their monitors.
3001 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
3002 /// treated as a non-batch channel going forward.
3003 pub fn set_batch_ready(&mut self) {
3004 self.context.is_batch_funding = None;
3005 self.context.channel_state.clear_waiting_for_batch();
3008 /// Unsets the existing funding information.
3010 /// This must only be used if the channel has not yet completed funding and has not been used.
3012 /// Further, the channel must be immediately shut down after this with a call to
3013 /// [`ChannelContext::force_shutdown`].
3014 pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
3015 debug_assert!(matches!(
3016 self.context.channel_state, ChannelState::AwaitingChannelReady(_)
3018 self.context.channel_transaction_parameters.funding_outpoint = None;
3019 self.context.channel_id = temporary_channel_id;
3022 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3023 /// and the channel is now usable (and public), this may generate an announcement_signatures to
3025 pub fn channel_ready<NS: Deref, L: Deref>(
3026 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
3027 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3028 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3030 NS::Target: NodeSigner,
3033 if self.context.channel_state.is_peer_disconnected() {
3034 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3035 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3038 if let Some(scid_alias) = msg.short_channel_id_alias {
3039 if Some(scid_alias) != self.context.short_channel_id {
3040 // The scid alias provided can be used to route payments *from* our counterparty,
3041 // i.e. can be used for inbound payments and provided in invoices, but is not used
3042 // when routing outbound payments.
3043 self.context.latest_inbound_scid_alias = Some(scid_alias);
3047 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3048 // batch, but we can receive channel_ready messages.
3049 let mut check_reconnection = false;
3050 match &self.context.channel_state {
3051 ChannelState::AwaitingChannelReady(flags) => {
3052 let flags = *flags & !FundedStateFlags::ALL;
3053 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3054 if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3055 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3056 check_reconnection = true;
3057 } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3058 self.context.channel_state.set_their_channel_ready();
3059 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3060 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3061 self.context.update_time_counter += 1;
3063 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3064 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3067 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3068 ChannelState::ChannelReady(_) => check_reconnection = true,
3069 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
3071 if check_reconnection {
3072 // They probably disconnected/reconnected and re-sent the channel_ready, which is
3073 // required, or they're sending a fresh SCID alias.
3074 let expected_point =
3075 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3076 // If they haven't ever sent an updated point, the point they send should match
3078 self.context.counterparty_cur_commitment_point
3079 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3080 // If we've advanced the commitment number once, the second commitment point is
3081 // at `counterparty_prev_commitment_point`, which is not yet revoked.
3082 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3083 self.context.counterparty_prev_commitment_point
3085 // If they have sent updated points, channel_ready is always supposed to match
3086 // their "first" point, which we re-derive here.
3087 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3088 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3089 ).expect("We already advanced, so previous secret keys should have been validated already")))
3091 if expected_point != Some(msg.next_per_commitment_point) {
3092 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3097 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3098 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3100 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3102 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3105 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3106 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3107 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3108 ) -> Result<(), ChannelError>
3109 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3110 FE::Target: FeeEstimator, L::Target: Logger,
3112 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3113 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3115 // We can't accept HTLCs sent after we've sent a shutdown.
3116 if self.context.channel_state.is_local_shutdown_sent() {
3117 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3119 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3120 if self.context.channel_state.is_remote_shutdown_sent() {
3121 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3123 if self.context.channel_state.is_peer_disconnected() {
3124 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3126 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3127 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3129 if msg.amount_msat == 0 {
3130 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3132 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3133 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3136 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3137 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3138 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3139 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3141 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3142 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3145 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3146 // the reserve_satoshis we told them to always have as direct payment so that they lose
3147 // something if we punish them for broadcasting an old state).
3148 // Note that we don't really care about having a small/no to_remote output in our local
3149 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3150 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3151 // present in the next commitment transaction we send them (at least for fulfilled ones,
3152 // failed ones won't modify value_to_self).
3153 // Note that we will send HTLCs which another instance of rust-lightning would think
3154 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3155 // Channel state once they will not be present in the next received commitment
3157 let mut removed_outbound_total_msat = 0;
3158 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3159 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3160 removed_outbound_total_msat += htlc.amount_msat;
3161 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3162 removed_outbound_total_msat += htlc.amount_msat;
3166 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3167 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3170 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3171 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3172 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3174 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3175 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3176 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3177 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3178 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3179 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3180 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3184 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3185 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3186 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3187 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3188 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3189 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3190 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3194 let pending_value_to_self_msat =
3195 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3196 let pending_remote_value_msat =
3197 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3198 if pending_remote_value_msat < msg.amount_msat {
3199 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3202 // Check that the remote can afford to pay for this HTLC on-chain at the current
3203 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3205 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3206 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3207 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3209 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3210 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3214 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3215 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3217 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3218 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3222 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3223 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3227 if !self.context.is_outbound() {
3228 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3229 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3230 // side, only on the sender's. Note that with anchor outputs we are no longer as
3231 // sensitive to fee spikes, so we need to account for them.
3232 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3233 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3234 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3235 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3237 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3238 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3239 // the HTLC, i.e. its status is already set to failing.
3240 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3241 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3244 // Check that they won't violate our local required channel reserve by adding this HTLC.
3245 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3246 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3247 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3248 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3251 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3252 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3254 if msg.cltv_expiry >= 500000000 {
3255 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3258 if self.context.channel_state.is_local_shutdown_sent() {
3259 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3260 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3264 // Now update local state:
3265 self.context.next_counterparty_htlc_id += 1;
3266 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3267 htlc_id: msg.htlc_id,
3268 amount_msat: msg.amount_msat,
3269 payment_hash: msg.payment_hash,
3270 cltv_expiry: msg.cltv_expiry,
3271 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3276 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3278 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3279 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3280 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3281 if htlc.htlc_id == htlc_id {
3282 let outcome = match check_preimage {
3283 None => fail_reason.into(),
3284 Some(payment_preimage) => {
3285 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3286 if payment_hash != htlc.payment_hash {
3287 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3289 OutboundHTLCOutcome::Success(Some(payment_preimage))
3293 OutboundHTLCState::LocalAnnounced(_) =>
3294 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3295 OutboundHTLCState::Committed => {
3296 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3298 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3299 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3304 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3307 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3308 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3309 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3311 if self.context.channel_state.is_peer_disconnected() {
3312 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3315 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3318 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3319 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3320 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3322 if self.context.channel_state.is_peer_disconnected() {
3323 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3326 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3330 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3331 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3332 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3334 if self.context.channel_state.is_peer_disconnected() {
3335 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3338 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3342 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3343 where L::Target: Logger
3345 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3346 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3348 if self.context.channel_state.is_peer_disconnected() {
3349 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3351 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3352 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3355 let funding_script = self.context.get_funding_redeemscript();
3357 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3359 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3360 let commitment_txid = {
3361 let trusted_tx = commitment_stats.tx.trust();
3362 let bitcoin_tx = trusted_tx.built_transaction();
3363 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3365 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3366 log_bytes!(msg.signature.serialize_compact()[..]),
3367 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3368 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3369 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3370 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3374 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3376 // If our counterparty updated the channel fee in this commitment transaction, check that
3377 // they can actually afford the new fee now.
3378 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3379 update_state == FeeUpdateState::RemoteAnnounced
3382 debug_assert!(!self.context.is_outbound());
3383 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3384 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3385 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3388 #[cfg(any(test, fuzzing))]
3390 if self.context.is_outbound() {
3391 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3392 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3393 if let Some(info) = projected_commit_tx_info {
3394 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3395 + self.context.holding_cell_htlc_updates.len();
3396 if info.total_pending_htlcs == total_pending_htlcs
3397 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3398 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3399 && info.feerate == self.context.feerate_per_kw {
3400 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3406 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3407 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3410 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3411 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3412 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3413 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3414 // backwards compatibility, we never use it in production. To provide test coverage, here,
3415 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3416 #[allow(unused_assignments, unused_mut)]
3417 let mut separate_nondust_htlc_sources = false;
3418 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3419 use core::hash::{BuildHasher, Hasher};
3420 // Get a random value using the only std API to do so - the DefaultHasher
3421 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3422 separate_nondust_htlc_sources = rand_val % 2 == 0;
3425 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3426 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3427 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3428 if let Some(_) = htlc.transaction_output_index {
3429 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3430 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3431 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3433 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3434 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3435 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3436 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3437 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3438 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3439 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3440 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3442 if !separate_nondust_htlc_sources {
3443 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3446 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3448 if separate_nondust_htlc_sources {
3449 if let Some(source) = source_opt.take() {
3450 nondust_htlc_sources.push(source);
3453 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3456 let holder_commitment_tx = HolderCommitmentTransaction::new(
3457 commitment_stats.tx,
3459 msg.htlc_signatures.clone(),
3460 &self.context.get_holder_pubkeys().funding_pubkey,
3461 self.context.counterparty_funding_pubkey()
3464 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3465 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3467 // Update state now that we've passed all the can-fail calls...
3468 let mut need_commitment = false;
3469 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3470 if *update_state == FeeUpdateState::RemoteAnnounced {
3471 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3472 need_commitment = true;
3476 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3477 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3478 Some(forward_info.clone())
3480 if let Some(forward_info) = new_forward {
3481 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3482 &htlc.payment_hash, &self.context.channel_id);
3483 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3484 need_commitment = true;
3487 let mut claimed_htlcs = Vec::new();
3488 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3489 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3490 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3491 &htlc.payment_hash, &self.context.channel_id);
3492 // Grab the preimage, if it exists, instead of cloning
3493 let mut reason = OutboundHTLCOutcome::Success(None);
3494 mem::swap(outcome, &mut reason);
3495 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3496 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3497 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3498 // have a `Success(None)` reason. In this case we could forget some HTLC
3499 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3500 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3502 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3504 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3505 need_commitment = true;
3509 self.context.latest_monitor_update_id += 1;
3510 let mut monitor_update = ChannelMonitorUpdate {
3511 update_id: self.context.latest_monitor_update_id,
3512 counterparty_node_id: Some(self.context.counterparty_node_id),
3513 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3514 commitment_tx: holder_commitment_tx,
3515 htlc_outputs: htlcs_and_sigs,
3517 nondust_htlc_sources,
3521 self.context.cur_holder_commitment_transaction_number -= 1;
3522 self.context.expecting_peer_commitment_signed = false;
3523 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3524 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3525 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3527 if self.context.channel_state.is_monitor_update_in_progress() {
3528 // In case we initially failed monitor updating without requiring a response, we need
3529 // to make sure the RAA gets sent first.
3530 self.context.monitor_pending_revoke_and_ack = true;
3531 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3532 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3533 // the corresponding HTLC status updates so that
3534 // get_last_commitment_update_for_send includes the right HTLCs.
3535 self.context.monitor_pending_commitment_signed = true;
3536 let mut additional_update = self.build_commitment_no_status_check(logger);
3537 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3538 // strictly increasing by one, so decrement it here.
3539 self.context.latest_monitor_update_id = monitor_update.update_id;
3540 monitor_update.updates.append(&mut additional_update.updates);
3542 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3543 &self.context.channel_id);
3544 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3547 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3548 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3549 // we'll send one right away when we get the revoke_and_ack when we
3550 // free_holding_cell_htlcs().
3551 let mut additional_update = self.build_commitment_no_status_check(logger);
3552 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3553 // strictly increasing by one, so decrement it here.
3554 self.context.latest_monitor_update_id = monitor_update.update_id;
3555 monitor_update.updates.append(&mut additional_update.updates);
3559 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3560 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3561 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3562 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3565 /// Public version of the below, checking relevant preconditions first.
3566 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3567 /// returns `(None, Vec::new())`.
3568 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3569 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3570 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3571 where F::Target: FeeEstimator, L::Target: Logger
3573 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
3574 self.free_holding_cell_htlcs(fee_estimator, logger)
3575 } else { (None, Vec::new()) }
3578 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3579 /// for our counterparty.
3580 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3581 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3582 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3583 where F::Target: FeeEstimator, L::Target: Logger
3585 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3586 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3587 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3588 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3590 let mut monitor_update = ChannelMonitorUpdate {
3591 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3592 counterparty_node_id: Some(self.context.counterparty_node_id),
3593 updates: Vec::new(),
3596 let mut htlc_updates = Vec::new();
3597 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3598 let mut update_add_count = 0;
3599 let mut update_fulfill_count = 0;
3600 let mut update_fail_count = 0;
3601 let mut htlcs_to_fail = Vec::new();
3602 for htlc_update in htlc_updates.drain(..) {
3603 // Note that this *can* fail, though it should be due to rather-rare conditions on
3604 // fee races with adding too many outputs which push our total payments just over
3605 // the limit. In case it's less rare than I anticipate, we may want to revisit
3606 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3607 // to rebalance channels.
3608 let fail_htlc_res = match &htlc_update {
3609 &HTLCUpdateAwaitingACK::AddHTLC {
3610 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3611 skimmed_fee_msat, blinding_point, ..
3613 match self.send_htlc(
3614 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3615 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3617 Ok(_) => update_add_count += 1,
3620 ChannelError::Ignore(ref msg) => {
3621 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3622 // If we fail to send here, then this HTLC should
3623 // be failed backwards. Failing to send here
3624 // indicates that this HTLC may keep being put back
3625 // into the holding cell without ever being
3626 // successfully forwarded/failed/fulfilled, causing
3627 // our counterparty to eventually close on us.
3628 htlcs_to_fail.push((source.clone(), *payment_hash));
3631 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3638 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3639 // If an HTLC claim was previously added to the holding cell (via
3640 // `get_update_fulfill_htlc`, then generating the claim message itself must
3641 // not fail - any in between attempts to claim the HTLC will have resulted
3642 // in it hitting the holding cell again and we cannot change the state of a
3643 // holding cell HTLC from fulfill to anything else.
3644 let mut additional_monitor_update =
3645 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3646 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3647 { monitor_update } else { unreachable!() };
3648 update_fulfill_count += 1;
3649 monitor_update.updates.append(&mut additional_monitor_update.updates);
3652 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3653 Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
3654 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
3656 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
3657 Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
3658 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
3661 if let Some(res) = fail_htlc_res {
3663 Ok(fail_msg_opt) => {
3664 // If an HTLC failure was previously added to the holding cell (via
3665 // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
3666 // not fail - we should never end up in a state where we double-fail
3667 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3668 // for a full revocation before failing.
3669 debug_assert!(fail_msg_opt.is_some());
3670 update_fail_count += 1;
3672 Err(ChannelError::Ignore(_)) => {},
3674 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3679 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3680 return (None, htlcs_to_fail);
3682 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3683 self.send_update_fee(feerate, false, fee_estimator, logger)
3688 let mut additional_update = self.build_commitment_no_status_check(logger);
3689 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3690 // but we want them to be strictly increasing by one, so reset it here.
3691 self.context.latest_monitor_update_id = monitor_update.update_id;
3692 monitor_update.updates.append(&mut additional_update.updates);
3694 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3695 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3696 update_add_count, update_fulfill_count, update_fail_count);
3698 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3699 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3705 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3706 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3707 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3708 /// generating an appropriate error *after* the channel state has been updated based on the
3709 /// revoke_and_ack message.
3710 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3711 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3712 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3713 where F::Target: FeeEstimator, L::Target: Logger,
3715 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3716 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3718 if self.context.channel_state.is_peer_disconnected() {
3719 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3721 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3722 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3725 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3727 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3728 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3729 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3733 if !self.context.channel_state.is_awaiting_remote_revoke() {
3734 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3735 // haven't given them a new commitment transaction to broadcast). We should probably
3736 // take advantage of this by updating our channel monitor, sending them an error, and
3737 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3738 // lot of work, and there's some chance this is all a misunderstanding anyway.
3739 // We have to do *something*, though, since our signer may get mad at us for otherwise
3740 // jumping a remote commitment number, so best to just force-close and move on.
3741 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3744 #[cfg(any(test, fuzzing))]
3746 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3747 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3750 match &self.context.holder_signer {
3751 ChannelSignerType::Ecdsa(ecdsa) => {
3752 ecdsa.validate_counterparty_revocation(
3753 self.context.cur_counterparty_commitment_transaction_number + 1,
3755 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3757 // TODO (taproot|arik)
3762 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3763 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3764 self.context.latest_monitor_update_id += 1;
3765 let mut monitor_update = ChannelMonitorUpdate {
3766 update_id: self.context.latest_monitor_update_id,
3767 counterparty_node_id: Some(self.context.counterparty_node_id),
3768 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3769 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3770 secret: msg.per_commitment_secret,
3774 // Update state now that we've passed all the can-fail calls...
3775 // (note that we may still fail to generate the new commitment_signed message, but that's
3776 // OK, we step the channel here and *then* if the new generation fails we can fail the
3777 // channel based on that, but stepping stuff here should be safe either way.
3778 self.context.channel_state.clear_awaiting_remote_revoke();
3779 self.context.sent_message_awaiting_response = None;
3780 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3781 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3782 self.context.cur_counterparty_commitment_transaction_number -= 1;
3784 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3785 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3788 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3789 let mut to_forward_infos = Vec::new();
3790 let mut revoked_htlcs = Vec::new();
3791 let mut finalized_claimed_htlcs = Vec::new();
3792 let mut update_fail_htlcs = Vec::new();
3793 let mut update_fail_malformed_htlcs = Vec::new();
3794 let mut require_commitment = false;
3795 let mut value_to_self_msat_diff: i64 = 0;
3798 // Take references explicitly so that we can hold multiple references to self.context.
3799 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3800 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3801 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3803 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3804 pending_inbound_htlcs.retain(|htlc| {
3805 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3806 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3807 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3808 value_to_self_msat_diff += htlc.amount_msat as i64;
3810 *expecting_peer_commitment_signed = true;
3814 pending_outbound_htlcs.retain(|htlc| {
3815 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3816 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3817 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3818 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3820 finalized_claimed_htlcs.push(htlc.source.clone());
3821 // They fulfilled, so we sent them money
3822 value_to_self_msat_diff -= htlc.amount_msat as i64;
3827 for htlc in pending_inbound_htlcs.iter_mut() {
3828 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3830 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3834 let mut state = InboundHTLCState::Committed;
3835 mem::swap(&mut state, &mut htlc.state);
3837 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3838 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3839 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3840 require_commitment = true;
3841 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3842 match forward_info {
3843 PendingHTLCStatus::Fail(fail_msg) => {
3844 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3845 require_commitment = true;
3847 HTLCFailureMsg::Relay(msg) => {
3848 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3849 update_fail_htlcs.push(msg)
3851 HTLCFailureMsg::Malformed(msg) => {
3852 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3853 update_fail_malformed_htlcs.push(msg)
3857 PendingHTLCStatus::Forward(forward_info) => {
3858 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3859 to_forward_infos.push((forward_info, htlc.htlc_id));
3860 htlc.state = InboundHTLCState::Committed;
3866 for htlc in pending_outbound_htlcs.iter_mut() {
3867 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3868 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3869 htlc.state = OutboundHTLCState::Committed;
3870 *expecting_peer_commitment_signed = true;
3872 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3873 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3874 // Grab the preimage, if it exists, instead of cloning
3875 let mut reason = OutboundHTLCOutcome::Success(None);
3876 mem::swap(outcome, &mut reason);
3877 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3878 require_commitment = true;
3882 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3884 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3885 match update_state {
3886 FeeUpdateState::Outbound => {
3887 debug_assert!(self.context.is_outbound());
3888 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3889 self.context.feerate_per_kw = feerate;
3890 self.context.pending_update_fee = None;
3891 self.context.expecting_peer_commitment_signed = true;
3893 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3894 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3895 debug_assert!(!self.context.is_outbound());
3896 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3897 require_commitment = true;
3898 self.context.feerate_per_kw = feerate;
3899 self.context.pending_update_fee = None;
3904 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3905 let release_state_str =
3906 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3907 macro_rules! return_with_htlcs_to_fail {
3908 ($htlcs_to_fail: expr) => {
3909 if !release_monitor {
3910 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3911 update: monitor_update,
3913 return Ok(($htlcs_to_fail, None));
3915 return Ok(($htlcs_to_fail, Some(monitor_update)));
3920 if self.context.channel_state.is_monitor_update_in_progress() {
3921 // We can't actually generate a new commitment transaction (incl by freeing holding
3922 // cells) while we can't update the monitor, so we just return what we have.
3923 if require_commitment {
3924 self.context.monitor_pending_commitment_signed = true;
3925 // When the monitor updating is restored we'll call
3926 // get_last_commitment_update_for_send(), which does not update state, but we're
3927 // definitely now awaiting a remote revoke before we can step forward any more, so
3929 let mut additional_update = self.build_commitment_no_status_check(logger);
3930 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3931 // strictly increasing by one, so decrement it here.
3932 self.context.latest_monitor_update_id = monitor_update.update_id;
3933 monitor_update.updates.append(&mut additional_update.updates);
3935 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3936 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3937 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3938 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3939 return_with_htlcs_to_fail!(Vec::new());
3942 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3943 (Some(mut additional_update), htlcs_to_fail) => {
3944 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3945 // strictly increasing by one, so decrement it here.
3946 self.context.latest_monitor_update_id = monitor_update.update_id;
3947 monitor_update.updates.append(&mut additional_update.updates);
3949 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3950 &self.context.channel_id(), release_state_str);
3952 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3953 return_with_htlcs_to_fail!(htlcs_to_fail);
3955 (None, htlcs_to_fail) => {
3956 if require_commitment {
3957 let mut additional_update = self.build_commitment_no_status_check(logger);
3959 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3960 // strictly increasing by one, so decrement it here.
3961 self.context.latest_monitor_update_id = monitor_update.update_id;
3962 monitor_update.updates.append(&mut additional_update.updates);
3964 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3965 &self.context.channel_id(),
3966 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3969 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3970 return_with_htlcs_to_fail!(htlcs_to_fail);
3972 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3973 &self.context.channel_id(), release_state_str);
3975 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3976 return_with_htlcs_to_fail!(htlcs_to_fail);
3982 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3983 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3984 /// commitment update.
3985 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3986 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3987 where F::Target: FeeEstimator, L::Target: Logger
3989 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3990 assert!(msg_opt.is_none(), "We forced holding cell?");
3993 /// Adds a pending update to this channel. See the doc for send_htlc for
3994 /// further details on the optionness of the return value.
3995 /// If our balance is too low to cover the cost of the next commitment transaction at the
3996 /// new feerate, the update is cancelled.
3998 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3999 /// [`Channel`] if `force_holding_cell` is false.
4000 fn send_update_fee<F: Deref, L: Deref>(
4001 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
4002 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4003 ) -> Option<msgs::UpdateFee>
4004 where F::Target: FeeEstimator, L::Target: Logger
4006 if !self.context.is_outbound() {
4007 panic!("Cannot send fee from inbound channel");
4009 if !self.context.is_usable() {
4010 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
4012 if !self.context.is_live() {
4013 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
4016 // Before proposing a feerate update, check that we can actually afford the new fee.
4017 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
4018 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
4019 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4020 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4021 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
4022 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
4023 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4024 //TODO: auto-close after a number of failures?
4025 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
4029 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
4030 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4031 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4032 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4033 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4034 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4037 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4038 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4042 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4043 force_holding_cell = true;
4046 if force_holding_cell {
4047 self.context.holding_cell_update_fee = Some(feerate_per_kw);
4051 debug_assert!(self.context.pending_update_fee.is_none());
4052 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4054 Some(msgs::UpdateFee {
4055 channel_id: self.context.channel_id,
4060 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4061 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4063 /// No further message handling calls may be made until a channel_reestablish dance has
4065 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
4066 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
4067 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4068 if self.context.channel_state.is_pre_funded_state() {
4072 if self.context.channel_state.is_peer_disconnected() {
4073 // While the below code should be idempotent, it's simpler to just return early, as
4074 // redundant disconnect events can fire, though they should be rare.
4078 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4079 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4082 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4083 // will be retransmitted.
4084 self.context.last_sent_closing_fee = None;
4085 self.context.pending_counterparty_closing_signed = None;
4086 self.context.closing_fee_limits = None;
4088 let mut inbound_drop_count = 0;
4089 self.context.pending_inbound_htlcs.retain(|htlc| {
4091 InboundHTLCState::RemoteAnnounced(_) => {
4092 // They sent us an update_add_htlc but we never got the commitment_signed.
4093 // We'll tell them what commitment_signed we're expecting next and they'll drop
4094 // this HTLC accordingly
4095 inbound_drop_count += 1;
4098 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4099 // We received a commitment_signed updating this HTLC and (at least hopefully)
4100 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4101 // in response to it yet, so don't touch it.
4104 InboundHTLCState::Committed => true,
4105 InboundHTLCState::LocalRemoved(_) => {
4106 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4107 // re-transmit if needed) and they may have even sent a revoke_and_ack back
4108 // (that we missed). Keep this around for now and if they tell us they missed
4109 // the commitment_signed we can re-transmit the update then.
4114 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4116 if let Some((_, update_state)) = self.context.pending_update_fee {
4117 if update_state == FeeUpdateState::RemoteAnnounced {
4118 debug_assert!(!self.context.is_outbound());
4119 self.context.pending_update_fee = None;
4123 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4124 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4125 // They sent us an update to remove this but haven't yet sent the corresponding
4126 // commitment_signed, we need to move it back to Committed and they can re-send
4127 // the update upon reconnection.
4128 htlc.state = OutboundHTLCState::Committed;
4132 self.context.sent_message_awaiting_response = None;
4134 self.context.channel_state.set_peer_disconnected();
4135 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4139 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4140 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4141 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4142 /// update completes (potentially immediately).
4143 /// The messages which were generated with the monitor update must *not* have been sent to the
4144 /// remote end, and must instead have been dropped. They will be regenerated when
4145 /// [`Self::monitor_updating_restored`] is called.
4147 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4148 /// [`chain::Watch`]: crate::chain::Watch
4149 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4150 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4151 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4152 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4153 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4155 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4156 self.context.monitor_pending_commitment_signed |= resend_commitment;
4157 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4158 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4159 self.context.monitor_pending_failures.append(&mut pending_fails);
4160 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4161 self.context.channel_state.set_monitor_update_in_progress();
4164 /// Indicates that the latest ChannelMonitor update has been committed by the client
4165 /// successfully and we should restore normal operation. Returns messages which should be sent
4166 /// to the remote side.
4167 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4168 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4169 user_config: &UserConfig, best_block_height: u32
4170 ) -> MonitorRestoreUpdates
4173 NS::Target: NodeSigner
4175 assert!(self.context.channel_state.is_monitor_update_in_progress());
4176 self.context.channel_state.clear_monitor_update_in_progress();
4178 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4179 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4180 // first received the funding_signed.
4181 let mut funding_broadcastable =
4182 if self.context.is_outbound() &&
4183 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4184 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
4186 self.context.funding_transaction.take()
4188 // That said, if the funding transaction is already confirmed (ie we're active with a
4189 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4190 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4191 funding_broadcastable = None;
4194 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4195 // (and we assume the user never directly broadcasts the funding transaction and waits for
4196 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4197 // * an inbound channel that failed to persist the monitor on funding_created and we got
4198 // the funding transaction confirmed before the monitor was persisted, or
4199 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4200 let channel_ready = if self.context.monitor_pending_channel_ready {
4201 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4202 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4203 self.context.monitor_pending_channel_ready = false;
4204 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4205 Some(msgs::ChannelReady {
4206 channel_id: self.context.channel_id(),
4207 next_per_commitment_point,
4208 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4212 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4214 let mut accepted_htlcs = Vec::new();
4215 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4216 let mut failed_htlcs = Vec::new();
4217 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4218 let mut finalized_claimed_htlcs = Vec::new();
4219 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4221 if self.context.channel_state.is_peer_disconnected() {
4222 self.context.monitor_pending_revoke_and_ack = false;
4223 self.context.monitor_pending_commitment_signed = false;
4224 return MonitorRestoreUpdates {
4225 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4226 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4230 let raa = if self.context.monitor_pending_revoke_and_ack {
4231 Some(self.get_last_revoke_and_ack())
4233 let commitment_update = if self.context.monitor_pending_commitment_signed {
4234 self.get_last_commitment_update_for_send(logger).ok()
4236 if commitment_update.is_some() {
4237 self.mark_awaiting_response();
4240 self.context.monitor_pending_revoke_and_ack = false;
4241 self.context.monitor_pending_commitment_signed = false;
4242 let order = self.context.resend_order.clone();
4243 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4244 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4245 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4246 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4247 MonitorRestoreUpdates {
4248 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4252 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4253 where F::Target: FeeEstimator, L::Target: Logger
4255 if self.context.is_outbound() {
4256 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4258 if self.context.channel_state.is_peer_disconnected() {
4259 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4261 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4263 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4264 self.context.update_time_counter += 1;
4265 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4266 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4267 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4268 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4269 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4270 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4271 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4272 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4273 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4274 msg.feerate_per_kw, holder_tx_dust_exposure)));
4276 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4277 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4278 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4284 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4286 #[cfg(async_signing)]
4287 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4288 let commitment_update = if self.context.signer_pending_commitment_update {
4289 self.get_last_commitment_update_for_send(logger).ok()
4291 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4292 self.context.get_funding_signed_msg(logger).1
4294 let channel_ready = if funding_signed.is_some() {
4295 self.check_get_channel_ready(0)
4298 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
4299 if commitment_update.is_some() { "a" } else { "no" },
4300 if funding_signed.is_some() { "a" } else { "no" },
4301 if channel_ready.is_some() { "a" } else { "no" });
4303 SignerResumeUpdates {
4310 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4311 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4312 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4313 msgs::RevokeAndACK {
4314 channel_id: self.context.channel_id,
4315 per_commitment_secret,
4316 next_per_commitment_point,
4318 next_local_nonce: None,
4322 /// Gets the last commitment update for immediate sending to our peer.
4323 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4324 let mut update_add_htlcs = Vec::new();
4325 let mut update_fulfill_htlcs = Vec::new();
4326 let mut update_fail_htlcs = Vec::new();
4327 let mut update_fail_malformed_htlcs = Vec::new();
4329 for htlc in self.context.pending_outbound_htlcs.iter() {
4330 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4331 update_add_htlcs.push(msgs::UpdateAddHTLC {
4332 channel_id: self.context.channel_id(),
4333 htlc_id: htlc.htlc_id,
4334 amount_msat: htlc.amount_msat,
4335 payment_hash: htlc.payment_hash,
4336 cltv_expiry: htlc.cltv_expiry,
4337 onion_routing_packet: (**onion_packet).clone(),
4338 skimmed_fee_msat: htlc.skimmed_fee_msat,
4339 blinding_point: htlc.blinding_point,
4344 for htlc in self.context.pending_inbound_htlcs.iter() {
4345 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4347 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4348 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4349 channel_id: self.context.channel_id(),
4350 htlc_id: htlc.htlc_id,
4351 reason: err_packet.clone()
4354 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4355 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4356 channel_id: self.context.channel_id(),
4357 htlc_id: htlc.htlc_id,
4358 sha256_of_onion: sha256_of_onion.clone(),
4359 failure_code: failure_code.clone(),
4362 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4363 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4364 channel_id: self.context.channel_id(),
4365 htlc_id: htlc.htlc_id,
4366 payment_preimage: payment_preimage.clone(),
4373 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4374 Some(msgs::UpdateFee {
4375 channel_id: self.context.channel_id(),
4376 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4380 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4381 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4382 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4383 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4384 if self.context.signer_pending_commitment_update {
4385 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4386 self.context.signer_pending_commitment_update = false;
4390 #[cfg(not(async_signing))] {
4391 panic!("Failed to get signature for new commitment state");
4393 #[cfg(async_signing)] {
4394 if !self.context.signer_pending_commitment_update {
4395 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4396 self.context.signer_pending_commitment_update = true;
4401 Ok(msgs::CommitmentUpdate {
4402 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4407 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4408 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4409 if self.context.channel_state.is_local_shutdown_sent() {
4410 assert!(self.context.shutdown_scriptpubkey.is_some());
4411 Some(msgs::Shutdown {
4412 channel_id: self.context.channel_id,
4413 scriptpubkey: self.get_closing_scriptpubkey(),
4418 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4419 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4421 /// Some links printed in log lines are included here to check them during build (when run with
4422 /// `cargo doc --document-private-items`):
4423 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4424 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4425 pub fn channel_reestablish<L: Deref, NS: Deref>(
4426 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4427 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4428 ) -> Result<ReestablishResponses, ChannelError>
4431 NS::Target: NodeSigner
4433 if !self.context.channel_state.is_peer_disconnected() {
4434 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4435 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4436 // just close here instead of trying to recover.
4437 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4440 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4441 msg.next_local_commitment_number == 0 {
4442 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4445 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4446 if msg.next_remote_commitment_number > 0 {
4447 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4448 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4449 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4450 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4451 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4453 if msg.next_remote_commitment_number > our_commitment_transaction {
4454 macro_rules! log_and_panic {
4455 ($err_msg: expr) => {
4456 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4457 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4460 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4461 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4462 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4463 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4464 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4465 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4466 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4467 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4471 // Before we change the state of the channel, we check if the peer is sending a very old
4472 // commitment transaction number, if yes we send a warning message.
4473 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4474 return Err(ChannelError::Warn(format!(
4475 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4476 msg.next_remote_commitment_number,
4477 our_commitment_transaction
4481 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4482 // remaining cases either succeed or ErrorMessage-fail).
4483 self.context.channel_state.clear_peer_disconnected();
4484 self.context.sent_message_awaiting_response = None;
4486 let shutdown_msg = self.get_outbound_shutdown();
4488 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4490 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4491 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4492 if !self.context.channel_state.is_our_channel_ready() ||
4493 self.context.channel_state.is_monitor_update_in_progress() {
4494 if msg.next_remote_commitment_number != 0 {
4495 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4497 // Short circuit the whole handler as there is nothing we can resend them
4498 return Ok(ReestablishResponses {
4499 channel_ready: None,
4500 raa: None, commitment_update: None,
4501 order: RAACommitmentOrder::CommitmentFirst,
4502 shutdown_msg, announcement_sigs,
4506 // We have OurChannelReady set!
4507 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4508 return Ok(ReestablishResponses {
4509 channel_ready: Some(msgs::ChannelReady {
4510 channel_id: self.context.channel_id(),
4511 next_per_commitment_point,
4512 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4514 raa: None, commitment_update: None,
4515 order: RAACommitmentOrder::CommitmentFirst,
4516 shutdown_msg, announcement_sigs,
4520 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4521 // Remote isn't waiting on any RevokeAndACK from us!
4522 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4524 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4525 if self.context.channel_state.is_monitor_update_in_progress() {
4526 self.context.monitor_pending_revoke_and_ack = true;
4529 Some(self.get_last_revoke_and_ack())
4532 debug_assert!(false, "All values should have been handled in the four cases above");
4533 return Err(ChannelError::Close(format!(
4534 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4535 msg.next_remote_commitment_number,
4536 our_commitment_transaction
4540 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4541 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4542 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4543 // the corresponding revoke_and_ack back yet.
4544 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4545 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4546 self.mark_awaiting_response();
4548 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4550 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4551 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4552 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4553 Some(msgs::ChannelReady {
4554 channel_id: self.context.channel_id(),
4555 next_per_commitment_point,
4556 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4560 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4561 if required_revoke.is_some() {
4562 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4564 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4567 Ok(ReestablishResponses {
4568 channel_ready, shutdown_msg, announcement_sigs,
4569 raa: required_revoke,
4570 commitment_update: None,
4571 order: self.context.resend_order.clone(),
4573 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4574 if required_revoke.is_some() {
4575 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4577 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4580 if self.context.channel_state.is_monitor_update_in_progress() {
4581 self.context.monitor_pending_commitment_signed = true;
4582 Ok(ReestablishResponses {
4583 channel_ready, shutdown_msg, announcement_sigs,
4584 commitment_update: None, raa: None,
4585 order: self.context.resend_order.clone(),
4588 Ok(ReestablishResponses {
4589 channel_ready, shutdown_msg, announcement_sigs,
4590 raa: required_revoke,
4591 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4592 order: self.context.resend_order.clone(),
4595 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4596 Err(ChannelError::Close(format!(
4597 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4598 msg.next_local_commitment_number,
4599 next_counterparty_commitment_number,
4602 Err(ChannelError::Close(format!(
4603 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4604 msg.next_local_commitment_number,
4605 next_counterparty_commitment_number,
4610 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4611 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4612 /// at which point they will be recalculated.
4613 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4615 where F::Target: FeeEstimator
4617 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4619 // Propose a range from our current Background feerate to our Normal feerate plus our
4620 // force_close_avoidance_max_fee_satoshis.
4621 // If we fail to come to consensus, we'll have to force-close.
4622 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4623 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4624 // that we don't expect to need fee bumping
4625 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4626 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4628 // The spec requires that (when the channel does not have anchors) we only send absolute
4629 // channel fees no greater than the absolute channel fee on the current commitment
4630 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4631 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4632 // some force-closure by old nodes, but we wanted to close the channel anyway.
4634 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4635 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4636 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4637 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4640 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4641 // below our dust limit, causing the output to disappear. We don't bother handling this
4642 // case, however, as this should only happen if a channel is closed before any (material)
4643 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4644 // come to consensus with our counterparty on appropriate fees, however it should be a
4645 // relatively rare case. We can revisit this later, though note that in order to determine
4646 // if the funders' output is dust we have to know the absolute fee we're going to use.
4647 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4648 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4649 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4650 // We always add force_close_avoidance_max_fee_satoshis to our normal
4651 // feerate-calculated fee, but allow the max to be overridden if we're using a
4652 // target feerate-calculated fee.
4653 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4654 proposed_max_feerate as u64 * tx_weight / 1000)
4656 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4659 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4660 self.context.closing_fee_limits.clone().unwrap()
4663 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4664 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4665 /// this point if we're the funder we should send the initial closing_signed, and in any case
4666 /// shutdown should complete within a reasonable timeframe.
4667 fn closing_negotiation_ready(&self) -> bool {
4668 self.context.closing_negotiation_ready()
4671 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4672 /// an Err if no progress is being made and the channel should be force-closed instead.
4673 /// Should be called on a one-minute timer.
4674 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4675 if self.closing_negotiation_ready() {
4676 if self.context.closing_signed_in_flight {
4677 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4679 self.context.closing_signed_in_flight = true;
4685 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4686 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4687 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4688 where F::Target: FeeEstimator, L::Target: Logger
4690 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4691 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4692 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4693 // that closing_negotiation_ready checks this case (as well as a few others).
4694 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4695 return Ok((None, None, None));
4698 if !self.context.is_outbound() {
4699 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4700 return self.closing_signed(fee_estimator, &msg);
4702 return Ok((None, None, None));
4705 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4706 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4707 if self.context.expecting_peer_commitment_signed {
4708 return Ok((None, None, None));
4711 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4713 assert!(self.context.shutdown_scriptpubkey.is_some());
4714 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4715 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4716 our_min_fee, our_max_fee, total_fee_satoshis);
4718 match &self.context.holder_signer {
4719 ChannelSignerType::Ecdsa(ecdsa) => {
4721 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4722 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4724 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4725 Ok((Some(msgs::ClosingSigned {
4726 channel_id: self.context.channel_id,
4727 fee_satoshis: total_fee_satoshis,
4729 fee_range: Some(msgs::ClosingSignedFeeRange {
4730 min_fee_satoshis: our_min_fee,
4731 max_fee_satoshis: our_max_fee,
4735 // TODO (taproot|arik)
4741 // Marks a channel as waiting for a response from the counterparty. If it's not received
4742 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4744 fn mark_awaiting_response(&mut self) {
4745 self.context.sent_message_awaiting_response = Some(0);
4748 /// Determines whether we should disconnect the counterparty due to not receiving a response
4749 /// within our expected timeframe.
4751 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4752 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4753 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4756 // Don't disconnect when we're not waiting on a response.
4759 *ticks_elapsed += 1;
4760 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4764 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4765 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4767 if self.context.channel_state.is_peer_disconnected() {
4768 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4770 if self.context.channel_state.is_pre_funded_state() {
4771 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4772 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4773 // can do that via error message without getting a connection fail anyway...
4774 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4776 for htlc in self.context.pending_inbound_htlcs.iter() {
4777 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4778 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4781 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4783 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4784 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4787 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4788 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4789 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4792 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4795 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4796 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4797 // any further commitment updates after we set LocalShutdownSent.
4798 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
4800 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4803 assert!(send_shutdown);
4804 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4805 Ok(scriptpubkey) => scriptpubkey,
4806 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4808 if !shutdown_scriptpubkey.is_compatible(their_features) {
4809 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4811 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4816 // From here on out, we may not fail!
4818 self.context.channel_state.set_remote_shutdown_sent();
4819 self.context.update_time_counter += 1;
4821 let monitor_update = if update_shutdown_script {
4822 self.context.latest_monitor_update_id += 1;
4823 let monitor_update = ChannelMonitorUpdate {
4824 update_id: self.context.latest_monitor_update_id,
4825 counterparty_node_id: Some(self.context.counterparty_node_id),
4826 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4827 scriptpubkey: self.get_closing_scriptpubkey(),
4830 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4831 self.push_ret_blockable_mon_update(monitor_update)
4833 let shutdown = if send_shutdown {
4834 Some(msgs::Shutdown {
4835 channel_id: self.context.channel_id,
4836 scriptpubkey: self.get_closing_scriptpubkey(),
4840 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4841 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4842 // cell HTLCs and return them to fail the payment.
4843 self.context.holding_cell_update_fee = None;
4844 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4845 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4847 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4848 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4855 self.context.channel_state.set_local_shutdown_sent();
4856 self.context.update_time_counter += 1;
4858 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4861 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4862 let mut tx = closing_tx.trust().built_transaction().clone();
4864 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4866 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4867 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4868 let mut holder_sig = sig.serialize_der().to_vec();
4869 holder_sig.push(EcdsaSighashType::All as u8);
4870 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4871 cp_sig.push(EcdsaSighashType::All as u8);
4872 if funding_key[..] < counterparty_funding_key[..] {
4873 tx.input[0].witness.push(holder_sig);
4874 tx.input[0].witness.push(cp_sig);
4876 tx.input[0].witness.push(cp_sig);
4877 tx.input[0].witness.push(holder_sig);
4880 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4884 pub fn closing_signed<F: Deref>(
4885 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4886 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4887 where F::Target: FeeEstimator
4889 if !self.context.channel_state.is_both_sides_shutdown() {
4890 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4892 if self.context.channel_state.is_peer_disconnected() {
4893 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4895 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4896 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4898 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4899 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4902 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4903 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4906 if self.context.channel_state.is_monitor_update_in_progress() {
4907 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4908 return Ok((None, None, None));
4911 let funding_redeemscript = self.context.get_funding_redeemscript();
4912 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4913 if used_total_fee != msg.fee_satoshis {
4914 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4916 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4918 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4921 // The remote end may have decided to revoke their output due to inconsistent dust
4922 // limits, so check for that case by re-checking the signature here.
4923 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4924 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4925 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4929 for outp in closing_tx.trust().built_transaction().output.iter() {
4930 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4931 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4935 assert!(self.context.shutdown_scriptpubkey.is_some());
4936 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4937 if last_fee == msg.fee_satoshis {
4938 let shutdown_result = ShutdownResult {
4939 closure_reason: ClosureReason::CooperativeClosure,
4940 monitor_update: None,
4941 dropped_outbound_htlcs: Vec::new(),
4942 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4943 channel_id: self.context.channel_id,
4944 user_channel_id: self.context.user_id,
4945 channel_capacity_satoshis: self.context.channel_value_satoshis,
4946 counterparty_node_id: self.context.counterparty_node_id,
4947 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
4949 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4950 self.context.channel_state = ChannelState::ShutdownComplete;
4951 self.context.update_time_counter += 1;
4952 return Ok((None, Some(tx), Some(shutdown_result)));
4956 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4958 macro_rules! propose_fee {
4959 ($new_fee: expr) => {
4960 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4961 (closing_tx, $new_fee)
4963 self.build_closing_transaction($new_fee, false)
4966 return match &self.context.holder_signer {
4967 ChannelSignerType::Ecdsa(ecdsa) => {
4969 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4970 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4971 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4972 let shutdown_result = ShutdownResult {
4973 closure_reason: ClosureReason::CooperativeClosure,
4974 monitor_update: None,
4975 dropped_outbound_htlcs: Vec::new(),
4976 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4977 channel_id: self.context.channel_id,
4978 user_channel_id: self.context.user_id,
4979 channel_capacity_satoshis: self.context.channel_value_satoshis,
4980 counterparty_node_id: self.context.counterparty_node_id,
4981 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
4983 self.context.channel_state = ChannelState::ShutdownComplete;
4984 self.context.update_time_counter += 1;
4985 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4986 (Some(tx), Some(shutdown_result))
4991 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4992 Ok((Some(msgs::ClosingSigned {
4993 channel_id: self.context.channel_id,
4994 fee_satoshis: used_fee,
4996 fee_range: Some(msgs::ClosingSignedFeeRange {
4997 min_fee_satoshis: our_min_fee,
4998 max_fee_satoshis: our_max_fee,
5000 }), signed_tx, shutdown_result))
5002 // TODO (taproot|arik)
5009 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
5010 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
5011 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
5013 if max_fee_satoshis < our_min_fee {
5014 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
5016 if min_fee_satoshis > our_max_fee {
5017 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
5020 if !self.context.is_outbound() {
5021 // They have to pay, so pick the highest fee in the overlapping range.
5022 // We should never set an upper bound aside from their full balance
5023 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
5024 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
5026 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
5027 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
5028 msg.fee_satoshis, our_min_fee, our_max_fee)));
5030 // The proposed fee is in our acceptable range, accept it and broadcast!
5031 propose_fee!(msg.fee_satoshis);
5034 // Old fee style negotiation. We don't bother to enforce whether they are complying
5035 // with the "making progress" requirements, we just comply and hope for the best.
5036 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
5037 if msg.fee_satoshis > last_fee {
5038 if msg.fee_satoshis < our_max_fee {
5039 propose_fee!(msg.fee_satoshis);
5040 } else if last_fee < our_max_fee {
5041 propose_fee!(our_max_fee);
5043 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5046 if msg.fee_satoshis > our_min_fee {
5047 propose_fee!(msg.fee_satoshis);
5048 } else if last_fee > our_min_fee {
5049 propose_fee!(our_min_fee);
5051 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5055 if msg.fee_satoshis < our_min_fee {
5056 propose_fee!(our_min_fee);
5057 } else if msg.fee_satoshis > our_max_fee {
5058 propose_fee!(our_max_fee);
5060 propose_fee!(msg.fee_satoshis);
5066 fn internal_htlc_satisfies_config(
5067 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
5068 ) -> Result<(), (&'static str, u16)> {
5069 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
5070 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
5071 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
5072 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5074 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5075 0x1000 | 12, // fee_insufficient
5078 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5080 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5081 0x1000 | 13, // incorrect_cltv_expiry
5087 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5088 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5089 /// unsuccessful, falls back to the previous one if one exists.
5090 pub fn htlc_satisfies_config(
5091 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5092 ) -> Result<(), (&'static str, u16)> {
5093 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5095 if let Some(prev_config) = self.context.prev_config() {
5096 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5103 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5104 self.context.cur_holder_commitment_transaction_number + 1
5107 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5108 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
5111 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5112 self.context.cur_counterparty_commitment_transaction_number + 2
5116 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5117 &self.context.holder_signer
5121 pub fn get_value_stat(&self) -> ChannelValueStat {
5123 value_to_self_msat: self.context.value_to_self_msat,
5124 channel_value_msat: self.context.channel_value_satoshis * 1000,
5125 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5126 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5127 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5128 holding_cell_outbound_amount_msat: {
5130 for h in self.context.holding_cell_htlc_updates.iter() {
5132 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5140 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5141 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5145 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5146 /// Allowed in any state (including after shutdown)
5147 pub fn is_awaiting_monitor_update(&self) -> bool {
5148 self.context.channel_state.is_monitor_update_in_progress()
5151 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5152 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5153 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5154 self.context.blocked_monitor_updates[0].update.update_id - 1
5157 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5158 /// further blocked monitor update exists after the next.
5159 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5160 if self.context.blocked_monitor_updates.is_empty() { return None; }
5161 Some((self.context.blocked_monitor_updates.remove(0).update,
5162 !self.context.blocked_monitor_updates.is_empty()))
5165 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5166 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5167 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5168 -> Option<ChannelMonitorUpdate> {
5169 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5170 if !release_monitor {
5171 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5180 pub fn blocked_monitor_updates_pending(&self) -> usize {
5181 self.context.blocked_monitor_updates.len()
5184 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5185 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5186 /// transaction. If the channel is inbound, this implies simply that the channel has not
5188 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5189 if !self.is_awaiting_monitor_update() { return false; }
5191 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5192 if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
5194 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5195 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5196 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5199 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5200 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5201 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5202 // waiting for the initial monitor persistence. Thus, we check if our commitment
5203 // transaction numbers have both been iterated only exactly once (for the
5204 // funding_signed), and we're awaiting monitor update.
5206 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5207 // only way to get an awaiting-monitor-update state during initial funding is if the
5208 // initial monitor persistence is still pending).
5210 // Because deciding we're awaiting initial broadcast spuriously could result in
5211 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5212 // we hard-assert here, even in production builds.
5213 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5214 assert!(self.context.monitor_pending_channel_ready);
5215 assert_eq!(self.context.latest_monitor_update_id, 0);
5221 /// Returns true if our channel_ready has been sent
5222 pub fn is_our_channel_ready(&self) -> bool {
5223 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5224 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5227 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5228 pub fn received_shutdown(&self) -> bool {
5229 self.context.channel_state.is_remote_shutdown_sent()
5232 /// Returns true if we either initiated or agreed to shut down the channel.
5233 pub fn sent_shutdown(&self) -> bool {
5234 self.context.channel_state.is_local_shutdown_sent()
5237 /// Returns true if this channel is fully shut down. True here implies that no further actions
5238 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5239 /// will be handled appropriately by the chain monitor.
5240 pub fn is_shutdown(&self) -> bool {
5241 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5244 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5245 self.context.channel_update_status
5248 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5249 self.context.update_time_counter += 1;
5250 self.context.channel_update_status = status;
5253 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5255 // * always when a new block/transactions are confirmed with the new height
5256 // * when funding is signed with a height of 0
5257 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5261 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5262 if funding_tx_confirmations <= 0 {
5263 self.context.funding_tx_confirmation_height = 0;
5266 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5270 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5271 // channel_ready yet.
5272 if self.context.signer_pending_funding {
5276 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5277 // channel_ready until the entire batch is ready.
5278 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
5279 self.context.channel_state.set_our_channel_ready();
5281 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5282 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5283 self.context.update_time_counter += 1;
5285 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5286 // We got a reorg but not enough to trigger a force close, just ignore.
5289 if self.context.funding_tx_confirmation_height != 0 &&
5290 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5292 // We should never see a funding transaction on-chain until we've received
5293 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5294 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5295 // however, may do this and we shouldn't treat it as a bug.
5296 #[cfg(not(fuzzing))]
5297 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5298 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5299 self.context.channel_state.to_u32());
5301 // We got a reorg but not enough to trigger a force close, just ignore.
5305 if need_commitment_update {
5306 if !self.context.channel_state.is_monitor_update_in_progress() {
5307 if !self.context.channel_state.is_peer_disconnected() {
5308 let next_per_commitment_point =
5309 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5310 return Some(msgs::ChannelReady {
5311 channel_id: self.context.channel_id,
5312 next_per_commitment_point,
5313 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5317 self.context.monitor_pending_channel_ready = true;
5323 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5324 /// In the first case, we store the confirmation height and calculating the short channel id.
5325 /// In the second, we simply return an Err indicating we need to be force-closed now.
5326 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5327 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5328 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5329 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5331 NS::Target: NodeSigner,
5334 let mut msgs = (None, None);
5335 if let Some(funding_txo) = self.context.get_funding_txo() {
5336 for &(index_in_block, tx) in txdata.iter() {
5337 // Check if the transaction is the expected funding transaction, and if it is,
5338 // check that it pays the right amount to the right script.
5339 if self.context.funding_tx_confirmation_height == 0 {
5340 if tx.txid() == funding_txo.txid {
5341 let txo_idx = funding_txo.index as usize;
5342 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5343 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5344 if self.context.is_outbound() {
5345 // If we generated the funding transaction and it doesn't match what it
5346 // should, the client is really broken and we should just panic and
5347 // tell them off. That said, because hash collisions happen with high
5348 // probability in fuzzing mode, if we're fuzzing we just close the
5349 // channel and move on.
5350 #[cfg(not(fuzzing))]
5351 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5353 self.context.update_time_counter += 1;
5354 let err_reason = "funding tx had wrong script/value or output index";
5355 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5357 if self.context.is_outbound() {
5358 if !tx.is_coin_base() {
5359 for input in tx.input.iter() {
5360 if input.witness.is_empty() {
5361 // We generated a malleable funding transaction, implying we've
5362 // just exposed ourselves to funds loss to our counterparty.
5363 #[cfg(not(fuzzing))]
5364 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5369 self.context.funding_tx_confirmation_height = height;
5370 self.context.funding_tx_confirmed_in = Some(*block_hash);
5371 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5372 Ok(scid) => Some(scid),
5373 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5376 // If this is a coinbase transaction and not a 0-conf channel
5377 // we should update our min_depth to 100 to handle coinbase maturity
5378 if tx.is_coin_base() &&
5379 self.context.minimum_depth.unwrap_or(0) > 0 &&
5380 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5381 self.context.minimum_depth = Some(COINBASE_MATURITY);
5384 // If we allow 1-conf funding, we may need to check for channel_ready here and
5385 // send it immediately instead of waiting for a best_block_updated call (which
5386 // may have already happened for this block).
5387 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5388 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5389 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5390 msgs = (Some(channel_ready), announcement_sigs);
5393 for inp in tx.input.iter() {
5394 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5395 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5396 return Err(ClosureReason::CommitmentTxConfirmed);
5404 /// When a new block is connected, we check the height of the block against outbound holding
5405 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5406 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5407 /// handled by the ChannelMonitor.
5409 /// If we return Err, the channel may have been closed, at which point the standard
5410 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5413 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5415 pub fn best_block_updated<NS: Deref, L: Deref>(
5416 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5417 node_signer: &NS, user_config: &UserConfig, logger: &L
5418 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5420 NS::Target: NodeSigner,
5423 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5426 fn do_best_block_updated<NS: Deref, L: Deref>(
5427 &mut self, height: u32, highest_header_time: u32,
5428 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5429 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5431 NS::Target: NodeSigner,
5434 let mut timed_out_htlcs = Vec::new();
5435 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5436 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5438 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5439 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5441 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5442 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5443 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5451 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5453 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5454 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5455 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5457 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5458 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5461 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5462 self.context.channel_state.is_our_channel_ready() {
5463 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5464 if self.context.funding_tx_confirmation_height == 0 {
5465 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5466 // zero if it has been reorged out, however in either case, our state flags
5467 // indicate we've already sent a channel_ready
5468 funding_tx_confirmations = 0;
5471 // If we've sent channel_ready (or have both sent and received channel_ready), and
5472 // the funding transaction has become unconfirmed,
5473 // close the channel and hope we can get the latest state on chain (because presumably
5474 // the funding transaction is at least still in the mempool of most nodes).
5476 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5477 // 0-conf channel, but not doing so may lead to the
5478 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5480 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5481 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5482 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5483 return Err(ClosureReason::ProcessingError { err: err_reason });
5485 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5486 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5487 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5488 // If funding_tx_confirmed_in is unset, the channel must not be active
5489 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5490 assert!(!self.context.channel_state.is_our_channel_ready());
5491 return Err(ClosureReason::FundingTimedOut);
5494 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5495 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5497 Ok((None, timed_out_htlcs, announcement_sigs))
5500 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5501 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5502 /// before the channel has reached channel_ready and we can just wait for more blocks.
5503 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5504 if self.context.funding_tx_confirmation_height != 0 {
5505 // We handle the funding disconnection by calling best_block_updated with a height one
5506 // below where our funding was connected, implying a reorg back to conf_height - 1.
5507 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5508 // We use the time field to bump the current time we set on channel updates if its
5509 // larger. If we don't know that time has moved forward, we can just set it to the last
5510 // time we saw and it will be ignored.
5511 let best_time = self.context.update_time_counter;
5512 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
5513 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5514 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5515 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5516 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5522 // We never learned about the funding confirmation anyway, just ignore
5527 // Methods to get unprompted messages to send to the remote end (or where we already returned
5528 // something in the handler for the message that prompted this message):
5530 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5531 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5532 /// directions). Should be used for both broadcasted announcements and in response to an
5533 /// AnnouncementSignatures message from the remote peer.
5535 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5538 /// This will only return ChannelError::Ignore upon failure.
5540 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5541 fn get_channel_announcement<NS: Deref>(
5542 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5543 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5544 if !self.context.config.announced_channel {
5545 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5547 if !self.context.is_usable() {
5548 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5551 let short_channel_id = self.context.get_short_channel_id()
5552 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5553 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5554 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5555 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5556 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5558 let msg = msgs::UnsignedChannelAnnouncement {
5559 features: channelmanager::provided_channel_features(&user_config),
5562 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5563 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5564 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5565 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5566 excess_data: Vec::new(),
5572 fn get_announcement_sigs<NS: Deref, L: Deref>(
5573 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5574 best_block_height: u32, logger: &L
5575 ) -> Option<msgs::AnnouncementSignatures>
5577 NS::Target: NodeSigner,
5580 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5584 if !self.context.is_usable() {
5588 if self.context.channel_state.is_peer_disconnected() {
5589 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5593 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5597 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5598 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5601 log_trace!(logger, "{:?}", e);
5605 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5607 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5612 match &self.context.holder_signer {
5613 ChannelSignerType::Ecdsa(ecdsa) => {
5614 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5616 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5621 let short_channel_id = match self.context.get_short_channel_id() {
5623 None => return None,
5626 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5628 Some(msgs::AnnouncementSignatures {
5629 channel_id: self.context.channel_id(),
5631 node_signature: our_node_sig,
5632 bitcoin_signature: our_bitcoin_sig,
5635 // TODO (taproot|arik)
5641 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5643 fn sign_channel_announcement<NS: Deref>(
5644 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5645 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5646 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5647 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5648 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5649 let were_node_one = announcement.node_id_1 == our_node_key;
5651 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5652 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5653 match &self.context.holder_signer {
5654 ChannelSignerType::Ecdsa(ecdsa) => {
5655 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5656 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5657 Ok(msgs::ChannelAnnouncement {
5658 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5659 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5660 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5661 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5662 contents: announcement,
5665 // TODO (taproot|arik)
5670 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5674 /// Processes an incoming announcement_signatures message, providing a fully-signed
5675 /// channel_announcement message which we can broadcast and storing our counterparty's
5676 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5677 pub fn announcement_signatures<NS: Deref>(
5678 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5679 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5680 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5681 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5683 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5685 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5686 return Err(ChannelError::Close(format!(
5687 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5688 &announcement, self.context.get_counterparty_node_id())));
5690 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5691 return Err(ChannelError::Close(format!(
5692 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5693 &announcement, self.context.counterparty_funding_pubkey())));
5696 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5697 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5698 return Err(ChannelError::Ignore(
5699 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5702 self.sign_channel_announcement(node_signer, announcement)
5705 /// Gets a signed channel_announcement for this channel, if we previously received an
5706 /// announcement_signatures from our counterparty.
5707 pub fn get_signed_channel_announcement<NS: Deref>(
5708 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5709 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5710 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5713 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5715 Err(_) => return None,
5717 match self.sign_channel_announcement(node_signer, announcement) {
5718 Ok(res) => Some(res),
5723 /// May panic if called on a channel that wasn't immediately-previously
5724 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5725 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5726 assert!(self.context.channel_state.is_peer_disconnected());
5727 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5728 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5729 // current to_remote balances. However, it no longer has any use, and thus is now simply
5730 // set to a dummy (but valid, as required by the spec) public key.
5731 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5732 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5733 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5734 let mut pk = [2; 33]; pk[1] = 0xff;
5735 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5736 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5737 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5738 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5741 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5744 self.mark_awaiting_response();
5745 msgs::ChannelReestablish {
5746 channel_id: self.context.channel_id(),
5747 // The protocol has two different commitment number concepts - the "commitment
5748 // transaction number", which starts from 0 and counts up, and the "revocation key
5749 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5750 // commitment transaction numbers by the index which will be used to reveal the
5751 // revocation key for that commitment transaction, which means we have to convert them
5752 // to protocol-level commitment numbers here...
5754 // next_local_commitment_number is the next commitment_signed number we expect to
5755 // receive (indicating if they need to resend one that we missed).
5756 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5757 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5758 // receive, however we track it by the next commitment number for a remote transaction
5759 // (which is one further, as they always revoke previous commitment transaction, not
5760 // the one we send) so we have to decrement by 1. Note that if
5761 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5762 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
5764 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5765 your_last_per_commitment_secret: remote_last_secret,
5766 my_current_per_commitment_point: dummy_pubkey,
5767 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5768 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5769 // txid of that interactive transaction, else we MUST NOT set it.
5770 next_funding_txid: None,
5775 // Send stuff to our remote peers:
5777 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5778 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5779 /// commitment update.
5781 /// `Err`s will only be [`ChannelError::Ignore`].
5782 pub fn queue_add_htlc<F: Deref, L: Deref>(
5783 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5784 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5785 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5786 ) -> Result<(), ChannelError>
5787 where F::Target: FeeEstimator, L::Target: Logger
5790 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5791 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5792 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5794 if let ChannelError::Ignore(_) = err { /* fine */ }
5795 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5800 /// Adds a pending outbound HTLC to this channel, note that you probably want
5801 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5803 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5805 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5806 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5808 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5809 /// we may not yet have sent the previous commitment update messages and will need to
5810 /// regenerate them.
5812 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5813 /// on this [`Channel`] if `force_holding_cell` is false.
5815 /// `Err`s will only be [`ChannelError::Ignore`].
5816 fn send_htlc<F: Deref, L: Deref>(
5817 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5818 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5819 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5820 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5821 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5822 where F::Target: FeeEstimator, L::Target: Logger
5824 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5825 self.context.channel_state.is_local_shutdown_sent() ||
5826 self.context.channel_state.is_remote_shutdown_sent()
5828 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5830 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5831 if amount_msat > channel_total_msat {
5832 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5835 if amount_msat == 0 {
5836 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5839 let available_balances = self.context.get_available_balances(fee_estimator);
5840 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5841 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5842 available_balances.next_outbound_htlc_minimum_msat)));
5845 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5846 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5847 available_balances.next_outbound_htlc_limit_msat)));
5850 if self.context.channel_state.is_peer_disconnected() {
5851 // Note that this should never really happen, if we're !is_live() on receipt of an
5852 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5853 // the user to send directly into a !is_live() channel. However, if we
5854 // disconnected during the time the previous hop was doing the commitment dance we may
5855 // end up getting here after the forwarding delay. In any case, returning an
5856 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5857 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5860 let need_holding_cell = self.context.channel_state.should_force_holding_cell();
5861 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5862 payment_hash, amount_msat,
5863 if force_holding_cell { "into holding cell" }
5864 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5865 else { "to peer" });
5867 if need_holding_cell {
5868 force_holding_cell = true;
5871 // Now update local state:
5872 if force_holding_cell {
5873 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5878 onion_routing_packet,
5885 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5886 htlc_id: self.context.next_holder_htlc_id,
5888 payment_hash: payment_hash.clone(),
5890 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5896 let res = msgs::UpdateAddHTLC {
5897 channel_id: self.context.channel_id,
5898 htlc_id: self.context.next_holder_htlc_id,
5902 onion_routing_packet,
5906 self.context.next_holder_htlc_id += 1;
5911 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5912 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5913 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5914 // fail to generate this, we still are at least at a position where upgrading their status
5916 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5917 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5918 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5920 if let Some(state) = new_state {
5921 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5925 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5926 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5927 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5928 // Grab the preimage, if it exists, instead of cloning
5929 let mut reason = OutboundHTLCOutcome::Success(None);
5930 mem::swap(outcome, &mut reason);
5931 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5934 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5935 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5936 debug_assert!(!self.context.is_outbound());
5937 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5938 self.context.feerate_per_kw = feerate;
5939 self.context.pending_update_fee = None;
5942 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5944 let (mut htlcs_ref, counterparty_commitment_tx) =
5945 self.build_commitment_no_state_update(logger);
5946 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5947 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5948 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5950 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5951 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5954 self.context.latest_monitor_update_id += 1;
5955 let monitor_update = ChannelMonitorUpdate {
5956 update_id: self.context.latest_monitor_update_id,
5957 counterparty_node_id: Some(self.context.counterparty_node_id),
5958 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5959 commitment_txid: counterparty_commitment_txid,
5960 htlc_outputs: htlcs.clone(),
5961 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5962 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5963 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5964 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5965 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5968 self.context.channel_state.set_awaiting_remote_revoke();
5972 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5973 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5974 where L::Target: Logger
5976 let counterparty_keys = self.context.build_remote_transaction_keys();
5977 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5978 let counterparty_commitment_tx = commitment_stats.tx;
5980 #[cfg(any(test, fuzzing))]
5982 if !self.context.is_outbound() {
5983 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5984 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5985 if let Some(info) = projected_commit_tx_info {
5986 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5987 if info.total_pending_htlcs == total_pending_htlcs
5988 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5989 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5990 && info.feerate == self.context.feerate_per_kw {
5991 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5992 assert_eq!(actual_fee, info.fee);
5998 (commitment_stats.htlcs_included, counterparty_commitment_tx)
6001 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
6002 /// generation when we shouldn't change HTLC/channel state.
6003 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
6004 // Get the fee tests from `build_commitment_no_state_update`
6005 #[cfg(any(test, fuzzing))]
6006 self.build_commitment_no_state_update(logger);
6008 let counterparty_keys = self.context.build_remote_transaction_keys();
6009 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6010 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
6012 match &self.context.holder_signer {
6013 ChannelSignerType::Ecdsa(ecdsa) => {
6014 let (signature, htlc_signatures);
6017 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
6018 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
6022 let res = ecdsa.sign_counterparty_commitment(
6023 &commitment_stats.tx,
6024 commitment_stats.inbound_htlc_preimages,
6025 commitment_stats.outbound_htlc_preimages,
6026 &self.context.secp_ctx,
6027 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
6029 htlc_signatures = res.1;
6031 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
6032 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
6033 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
6034 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
6036 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6037 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6038 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6039 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
6040 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
6041 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
6045 Ok((msgs::CommitmentSigned {
6046 channel_id: self.context.channel_id,
6050 partial_signature_with_nonce: None,
6051 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6053 // TODO (taproot|arik)
6059 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6060 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6062 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6063 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6064 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
6065 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
6066 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6067 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6068 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
6069 where F::Target: FeeEstimator, L::Target: Logger
6071 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
6072 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
6073 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6076 let monitor_update = self.build_commitment_no_status_check(logger);
6077 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6078 Ok(self.push_ret_blockable_mon_update(monitor_update))
6084 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
6086 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
6087 let new_forwarding_info = Some(CounterpartyForwardingInfo {
6088 fee_base_msat: msg.contents.fee_base_msat,
6089 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6090 cltv_expiry_delta: msg.contents.cltv_expiry_delta
6092 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
6094 self.context.counterparty_forwarding_info = new_forwarding_info;
6100 /// Begins the shutdown process, getting a message for the remote peer and returning all
6101 /// holding cell HTLCs for payment failure.
6102 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6103 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6104 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6106 for htlc in self.context.pending_outbound_htlcs.iter() {
6107 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6108 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6111 if self.context.channel_state.is_local_shutdown_sent() {
6112 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6114 else if self.context.channel_state.is_remote_shutdown_sent() {
6115 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6117 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6118 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6120 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6121 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6122 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6125 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6128 // use override shutdown script if provided
6129 let shutdown_scriptpubkey = match override_shutdown_script {
6130 Some(script) => script,
6132 // otherwise, use the shutdown scriptpubkey provided by the signer
6133 match signer_provider.get_shutdown_scriptpubkey() {
6134 Ok(scriptpubkey) => scriptpubkey,
6135 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6139 if !shutdown_scriptpubkey.is_compatible(their_features) {
6140 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6142 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6147 // From here on out, we may not fail!
6148 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6149 self.context.channel_state.set_local_shutdown_sent();
6150 self.context.update_time_counter += 1;
6152 let monitor_update = if update_shutdown_script {
6153 self.context.latest_monitor_update_id += 1;
6154 let monitor_update = ChannelMonitorUpdate {
6155 update_id: self.context.latest_monitor_update_id,
6156 counterparty_node_id: Some(self.context.counterparty_node_id),
6157 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6158 scriptpubkey: self.get_closing_scriptpubkey(),
6161 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6162 self.push_ret_blockable_mon_update(monitor_update)
6164 let shutdown = msgs::Shutdown {
6165 channel_id: self.context.channel_id,
6166 scriptpubkey: self.get_closing_scriptpubkey(),
6169 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6170 // our shutdown until we've committed all of the pending changes.
6171 self.context.holding_cell_update_fee = None;
6172 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6173 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6175 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6176 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6183 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6184 "we can't both complete shutdown and return a monitor update");
6186 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6189 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6190 self.context.holding_cell_htlc_updates.iter()
6191 .flat_map(|htlc_update| {
6193 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6194 => Some((source, payment_hash)),
6198 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6202 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6203 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6204 pub context: ChannelContext<SP>,
6205 pub unfunded_context: UnfundedChannelContext,
6208 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6209 pub fn new<ES: Deref, F: Deref>(
6210 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6211 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6212 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6213 ) -> Result<OutboundV1Channel<SP>, APIError>
6214 where ES::Target: EntropySource,
6215 F::Target: FeeEstimator
6217 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6218 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6219 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6220 let pubkeys = holder_signer.pubkeys().clone();
6222 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6223 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6225 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6226 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6228 let channel_value_msat = channel_value_satoshis * 1000;
6229 if push_msat > channel_value_msat {
6230 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6232 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6233 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6235 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6236 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6237 // Protocol level safety check in place, although it should never happen because
6238 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6239 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6242 let channel_type = Self::get_initial_channel_type(&config, their_features);
6243 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6245 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6246 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6248 (ConfirmationTarget::NonAnchorChannelFee, 0)
6250 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6252 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6253 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6254 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6255 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6258 let mut secp_ctx = Secp256k1::new();
6259 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6261 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6262 match signer_provider.get_shutdown_scriptpubkey() {
6263 Ok(scriptpubkey) => Some(scriptpubkey),
6264 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6268 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6269 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6270 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6274 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6275 Ok(script) => script,
6276 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6279 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6282 context: ChannelContext {
6285 config: LegacyChannelConfig {
6286 options: config.channel_config.clone(),
6287 announced_channel: config.channel_handshake_config.announced_channel,
6288 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6293 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6295 channel_id: temporary_channel_id,
6296 temporary_channel_id: Some(temporary_channel_id),
6297 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6298 announcement_sigs_state: AnnouncementSigsState::NotSent,
6300 channel_value_satoshis,
6302 latest_monitor_update_id: 0,
6304 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6305 shutdown_scriptpubkey,
6308 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6309 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6312 pending_inbound_htlcs: Vec::new(),
6313 pending_outbound_htlcs: Vec::new(),
6314 holding_cell_htlc_updates: Vec::new(),
6315 pending_update_fee: None,
6316 holding_cell_update_fee: None,
6317 next_holder_htlc_id: 0,
6318 next_counterparty_htlc_id: 0,
6319 update_time_counter: 1,
6321 resend_order: RAACommitmentOrder::CommitmentFirst,
6323 monitor_pending_channel_ready: false,
6324 monitor_pending_revoke_and_ack: false,
6325 monitor_pending_commitment_signed: false,
6326 monitor_pending_forwards: Vec::new(),
6327 monitor_pending_failures: Vec::new(),
6328 monitor_pending_finalized_fulfills: Vec::new(),
6330 signer_pending_commitment_update: false,
6331 signer_pending_funding: false,
6333 #[cfg(debug_assertions)]
6334 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6335 #[cfg(debug_assertions)]
6336 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6338 last_sent_closing_fee: None,
6339 pending_counterparty_closing_signed: None,
6340 expecting_peer_commitment_signed: false,
6341 closing_fee_limits: None,
6342 target_closing_feerate_sats_per_kw: None,
6344 funding_tx_confirmed_in: None,
6345 funding_tx_confirmation_height: 0,
6346 short_channel_id: None,
6347 channel_creation_height: current_chain_height,
6349 feerate_per_kw: commitment_feerate,
6350 counterparty_dust_limit_satoshis: 0,
6351 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6352 counterparty_max_htlc_value_in_flight_msat: 0,
6353 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6354 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6355 holder_selected_channel_reserve_satoshis,
6356 counterparty_htlc_minimum_msat: 0,
6357 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6358 counterparty_max_accepted_htlcs: 0,
6359 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6360 minimum_depth: None, // Filled in in accept_channel
6362 counterparty_forwarding_info: None,
6364 channel_transaction_parameters: ChannelTransactionParameters {
6365 holder_pubkeys: pubkeys,
6366 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6367 is_outbound_from_holder: true,
6368 counterparty_parameters: None,
6369 funding_outpoint: None,
6370 channel_type_features: channel_type.clone()
6372 funding_transaction: None,
6373 is_batch_funding: None,
6375 counterparty_cur_commitment_point: None,
6376 counterparty_prev_commitment_point: None,
6377 counterparty_node_id,
6379 counterparty_shutdown_scriptpubkey: None,
6381 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6383 channel_update_status: ChannelUpdateStatus::Enabled,
6384 closing_signed_in_flight: false,
6386 announcement_sigs: None,
6388 #[cfg(any(test, fuzzing))]
6389 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6390 #[cfg(any(test, fuzzing))]
6391 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6393 workaround_lnd_bug_4006: None,
6394 sent_message_awaiting_response: None,
6396 latest_inbound_scid_alias: None,
6397 outbound_scid_alias,
6399 channel_pending_event_emitted: false,
6400 channel_ready_event_emitted: false,
6402 #[cfg(any(test, fuzzing))]
6403 historical_inbound_htlc_fulfills: HashSet::new(),
6408 blocked_monitor_updates: Vec::new(),
6410 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6414 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
6415 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6416 let counterparty_keys = self.context.build_remote_transaction_keys();
6417 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6418 let signature = match &self.context.holder_signer {
6419 // TODO (taproot|arik): move match into calling method for Taproot
6420 ChannelSignerType::Ecdsa(ecdsa) => {
6421 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
6422 .map(|(sig, _)| sig).ok()?
6424 // TODO (taproot|arik)
6429 if self.context.signer_pending_funding {
6430 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
6431 self.context.signer_pending_funding = false;
6434 Some(msgs::FundingCreated {
6435 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
6436 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
6437 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
6440 partial_signature_with_nonce: None,
6442 next_local_nonce: None,
6446 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6447 /// a funding_created message for the remote peer.
6448 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6449 /// or if called on an inbound channel.
6450 /// Note that channel_id changes during this call!
6451 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6452 /// If an Err is returned, it is a ChannelError::Close.
6453 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6454 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
6455 if !self.context.is_outbound() {
6456 panic!("Tried to create outbound funding_created message on an inbound channel!");
6459 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6460 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6462 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6464 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6465 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6466 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6467 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6470 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6471 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6473 // Now that we're past error-generating stuff, update our local state:
6475 self.context.channel_state = ChannelState::FundingNegotiated;
6476 self.context.channel_id = funding_txo.to_channel_id();
6478 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6479 // We can skip this if it is a zero-conf channel.
6480 if funding_transaction.is_coin_base() &&
6481 self.context.minimum_depth.unwrap_or(0) > 0 &&
6482 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6483 self.context.minimum_depth = Some(COINBASE_MATURITY);
6486 self.context.funding_transaction = Some(funding_transaction);
6487 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6489 let funding_created = self.get_funding_created_msg(logger);
6490 if funding_created.is_none() {
6491 #[cfg(not(async_signing))] {
6492 panic!("Failed to get signature for new funding creation");
6494 #[cfg(async_signing)] {
6495 if !self.context.signer_pending_funding {
6496 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6497 self.context.signer_pending_funding = true;
6505 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6506 // The default channel type (ie the first one we try) depends on whether the channel is
6507 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6508 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6509 // with no other changes, and fall back to `only_static_remotekey`.
6510 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6511 if !config.channel_handshake_config.announced_channel &&
6512 config.channel_handshake_config.negotiate_scid_privacy &&
6513 their_features.supports_scid_privacy() {
6514 ret.set_scid_privacy_required();
6517 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6518 // set it now. If they don't understand it, we'll fall back to our default of
6519 // `only_static_remotekey`.
6520 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6521 their_features.supports_anchors_zero_fee_htlc_tx() {
6522 ret.set_anchors_zero_fee_htlc_tx_required();
6528 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6529 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6530 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6531 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6532 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6533 ) -> Result<msgs::OpenChannel, ()>
6535 F::Target: FeeEstimator
6537 if !self.context.is_outbound() ||
6539 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6540 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6545 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6546 // We've exhausted our options
6549 // We support opening a few different types of channels. Try removing our additional
6550 // features one by one until we've either arrived at our default or the counterparty has
6553 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6554 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6555 // checks whether the counterparty supports every feature, this would only happen if the
6556 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6558 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6559 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6560 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6561 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6562 } else if self.context.channel_type.supports_scid_privacy() {
6563 self.context.channel_type.clear_scid_privacy();
6565 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6567 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6568 Ok(self.get_open_channel(chain_hash))
6571 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6572 if !self.context.is_outbound() {
6573 panic!("Tried to open a channel for an inbound channel?");
6575 if self.context.have_received_message() {
6576 panic!("Cannot generate an open_channel after we've moved forward");
6579 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6580 panic!("Tried to send an open_channel for a channel that has already advanced");
6583 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6584 let keys = self.context.get_holder_pubkeys();
6588 temporary_channel_id: self.context.channel_id,
6589 funding_satoshis: self.context.channel_value_satoshis,
6590 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6591 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6592 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6593 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6594 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6595 feerate_per_kw: self.context.feerate_per_kw as u32,
6596 to_self_delay: self.context.get_holder_selected_contest_delay(),
6597 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6598 funding_pubkey: keys.funding_pubkey,
6599 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6600 payment_point: keys.payment_point,
6601 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6602 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6603 first_per_commitment_point,
6604 channel_flags: if self.context.config.announced_channel {1} else {0},
6605 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6606 Some(script) => script.clone().into_inner(),
6607 None => Builder::new().into_script(),
6609 channel_type: Some(self.context.channel_type.clone()),
6614 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6615 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6617 // Check sanity of message fields:
6618 if !self.context.is_outbound() {
6619 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6621 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
6622 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6624 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6625 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6627 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6628 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6630 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6631 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6633 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6634 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6635 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6637 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6638 if msg.htlc_minimum_msat >= full_channel_value_msat {
6639 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6641 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6642 if msg.to_self_delay > max_delay_acceptable {
6643 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6645 if msg.max_accepted_htlcs < 1 {
6646 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6648 if msg.max_accepted_htlcs > MAX_HTLCS {
6649 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6652 // Now check against optional parameters as set by config...
6653 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6654 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6656 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6657 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6659 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6660 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6662 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6663 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6665 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6666 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6668 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6669 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6671 if msg.minimum_depth > peer_limits.max_minimum_depth {
6672 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6675 if let Some(ty) = &msg.channel_type {
6676 if *ty != self.context.channel_type {
6677 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6679 } else if their_features.supports_channel_type() {
6680 // Assume they've accepted the channel type as they said they understand it.
6682 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6683 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6684 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6686 self.context.channel_type = channel_type.clone();
6687 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6690 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6691 match &msg.shutdown_scriptpubkey {
6692 &Some(ref script) => {
6693 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6694 if script.len() == 0 {
6697 if !script::is_bolt2_compliant(&script, their_features) {
6698 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6700 Some(script.clone())
6703 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6705 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6710 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6711 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6712 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6713 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6714 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6716 if peer_limits.trust_own_funding_0conf {
6717 self.context.minimum_depth = Some(msg.minimum_depth);
6719 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6722 let counterparty_pubkeys = ChannelPublicKeys {
6723 funding_pubkey: msg.funding_pubkey,
6724 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6725 payment_point: msg.payment_point,
6726 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6727 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6730 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6731 selected_contest_delay: msg.to_self_delay,
6732 pubkeys: counterparty_pubkeys,
6735 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6736 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6738 self.context.channel_state = ChannelState::NegotiatingFunding(
6739 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6741 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6746 /// Handles a funding_signed message from the remote end.
6747 /// If this call is successful, broadcast the funding transaction (and not before!)
6748 pub fn funding_signed<L: Deref>(
6749 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
6750 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
6754 if !self.context.is_outbound() {
6755 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
6757 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
6758 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
6760 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6761 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6762 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6763 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6766 let funding_script = self.context.get_funding_redeemscript();
6768 let counterparty_keys = self.context.build_remote_transaction_keys();
6769 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6770 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6771 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6773 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6774 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6776 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6777 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
6779 let trusted_tx = initial_commitment_tx.trust();
6780 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6781 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6782 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
6783 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
6784 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
6788 let holder_commitment_tx = HolderCommitmentTransaction::new(
6789 initial_commitment_tx,
6792 &self.context.get_holder_pubkeys().funding_pubkey,
6793 self.context.counterparty_funding_pubkey()
6797 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
6798 if validated.is_err() {
6799 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6802 let funding_redeemscript = self.context.get_funding_redeemscript();
6803 let funding_txo = self.context.get_funding_txo().unwrap();
6804 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6805 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6806 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6807 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6808 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6809 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6810 shutdown_script, self.context.get_holder_selected_contest_delay(),
6811 &self.context.destination_script, (funding_txo, funding_txo_script),
6812 &self.context.channel_transaction_parameters,
6813 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6815 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6816 channel_monitor.provide_initial_counterparty_commitment_tx(
6817 counterparty_initial_bitcoin_tx.txid, Vec::new(),
6818 self.context.cur_counterparty_commitment_transaction_number,
6819 self.context.counterparty_cur_commitment_point.unwrap(),
6820 counterparty_initial_commitment_tx.feerate_per_kw(),
6821 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6822 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6824 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
6825 if self.context.is_batch_funding() {
6826 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
6828 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
6830 self.context.cur_holder_commitment_transaction_number -= 1;
6831 self.context.cur_counterparty_commitment_transaction_number -= 1;
6833 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
6835 let mut channel = Channel { context: self.context };
6837 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6838 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6839 Ok((channel, channel_monitor))
6842 /// Indicates that the signer may have some signatures for us, so we should retry if we're
6844 #[cfg(async_signing)]
6845 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6846 if self.context.signer_pending_funding && self.context.is_outbound() {
6847 log_trace!(logger, "Signer unblocked a funding_created");
6848 self.get_funding_created_msg(logger)
6853 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6854 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6855 pub context: ChannelContext<SP>,
6856 pub unfunded_context: UnfundedChannelContext,
6859 /// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
6860 /// [`msgs::OpenChannel`].
6861 pub(super) fn channel_type_from_open_channel(
6862 msg: &msgs::OpenChannel, their_features: &InitFeatures,
6863 our_supported_features: &ChannelTypeFeatures
6864 ) -> Result<ChannelTypeFeatures, ChannelError> {
6865 if let Some(channel_type) = &msg.channel_type {
6866 if channel_type.supports_any_optional_bits() {
6867 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6870 // We only support the channel types defined by the `ChannelManager` in
6871 // `provided_channel_type_features`. The channel type must always support
6872 // `static_remote_key`.
6873 if !channel_type.requires_static_remote_key() {
6874 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6876 // Make sure we support all of the features behind the channel type.
6877 if !channel_type.is_subset(our_supported_features) {
6878 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6880 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6881 if channel_type.requires_scid_privacy() && announced_channel {
6882 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6884 Ok(channel_type.clone())
6886 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6887 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6888 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6894 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6895 /// Creates a new channel from a remote sides' request for one.
6896 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6897 pub fn new<ES: Deref, F: Deref, L: Deref>(
6898 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6899 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6900 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6901 current_chain_height: u32, logger: &L, is_0conf: bool,
6902 ) -> Result<InboundV1Channel<SP>, ChannelError>
6903 where ES::Target: EntropySource,
6904 F::Target: FeeEstimator,
6907 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6908 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6910 // First check the channel type is known, failing before we do anything else if we don't
6911 // support this channel type.
6912 let channel_type = channel_type_from_open_channel(msg, their_features, our_supported_features)?;
6914 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6915 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6916 let pubkeys = holder_signer.pubkeys().clone();
6917 let counterparty_pubkeys = ChannelPublicKeys {
6918 funding_pubkey: msg.funding_pubkey,
6919 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6920 payment_point: msg.payment_point,
6921 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6922 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6925 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6926 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6929 // Check sanity of message fields:
6930 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6931 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6933 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6934 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6936 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6937 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6939 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6940 if msg.push_msat > full_channel_value_msat {
6941 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6943 if msg.dust_limit_satoshis > msg.funding_satoshis {
6944 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6946 if msg.htlc_minimum_msat >= full_channel_value_msat {
6947 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6949 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6951 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6952 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6953 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6955 if msg.max_accepted_htlcs < 1 {
6956 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6958 if msg.max_accepted_htlcs > MAX_HTLCS {
6959 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6962 // Now check against optional parameters as set by config...
6963 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6964 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6966 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6967 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6969 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6970 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6972 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6973 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6975 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6976 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6978 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6979 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6981 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6982 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6985 // Convert things into internal flags and prep our state:
6987 if config.channel_handshake_limits.force_announced_channel_preference {
6988 if config.channel_handshake_config.announced_channel != announced_channel {
6989 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6993 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6994 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6995 // Protocol level safety check in place, although it should never happen because
6996 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6997 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6999 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
7000 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
7002 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7003 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
7004 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
7006 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
7007 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
7010 // check if the funder's amount for the initial commitment tx is sufficient
7011 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
7012 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
7013 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
7017 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
7018 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
7019 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
7020 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
7023 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
7024 // While it's reasonable for us to not meet the channel reserve initially (if they don't
7025 // want to push much to us), our counterparty should always have more than our reserve.
7026 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
7027 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
7030 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7031 match &msg.shutdown_scriptpubkey {
7032 &Some(ref script) => {
7033 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7034 if script.len() == 0 {
7037 if !script::is_bolt2_compliant(&script, their_features) {
7038 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
7040 Some(script.clone())
7043 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7045 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7050 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
7051 match signer_provider.get_shutdown_scriptpubkey() {
7052 Ok(scriptpubkey) => Some(scriptpubkey),
7053 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
7057 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
7058 if !shutdown_scriptpubkey.is_compatible(&their_features) {
7059 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
7063 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
7064 Ok(script) => script,
7065 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
7068 let mut secp_ctx = Secp256k1::new();
7069 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7071 let minimum_depth = if is_0conf {
7074 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
7078 context: ChannelContext {
7081 config: LegacyChannelConfig {
7082 options: config.channel_config.clone(),
7084 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
7089 inbound_handshake_limits_override: None,
7091 temporary_channel_id: Some(msg.temporary_channel_id),
7092 channel_id: msg.temporary_channel_id,
7093 channel_state: ChannelState::NegotiatingFunding(
7094 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7096 announcement_sigs_state: AnnouncementSigsState::NotSent,
7099 latest_monitor_update_id: 0,
7101 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7102 shutdown_scriptpubkey,
7105 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7106 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7107 value_to_self_msat: msg.push_msat,
7109 pending_inbound_htlcs: Vec::new(),
7110 pending_outbound_htlcs: Vec::new(),
7111 holding_cell_htlc_updates: Vec::new(),
7112 pending_update_fee: None,
7113 holding_cell_update_fee: None,
7114 next_holder_htlc_id: 0,
7115 next_counterparty_htlc_id: 0,
7116 update_time_counter: 1,
7118 resend_order: RAACommitmentOrder::CommitmentFirst,
7120 monitor_pending_channel_ready: false,
7121 monitor_pending_revoke_and_ack: false,
7122 monitor_pending_commitment_signed: false,
7123 monitor_pending_forwards: Vec::new(),
7124 monitor_pending_failures: Vec::new(),
7125 monitor_pending_finalized_fulfills: Vec::new(),
7127 signer_pending_commitment_update: false,
7128 signer_pending_funding: false,
7130 #[cfg(debug_assertions)]
7131 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7132 #[cfg(debug_assertions)]
7133 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7135 last_sent_closing_fee: None,
7136 pending_counterparty_closing_signed: None,
7137 expecting_peer_commitment_signed: false,
7138 closing_fee_limits: None,
7139 target_closing_feerate_sats_per_kw: None,
7141 funding_tx_confirmed_in: None,
7142 funding_tx_confirmation_height: 0,
7143 short_channel_id: None,
7144 channel_creation_height: current_chain_height,
7146 feerate_per_kw: msg.feerate_per_kw,
7147 channel_value_satoshis: msg.funding_satoshis,
7148 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
7149 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7150 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
7151 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
7152 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7153 holder_selected_channel_reserve_satoshis,
7154 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
7155 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7156 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
7157 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7160 counterparty_forwarding_info: None,
7162 channel_transaction_parameters: ChannelTransactionParameters {
7163 holder_pubkeys: pubkeys,
7164 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7165 is_outbound_from_holder: false,
7166 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7167 selected_contest_delay: msg.to_self_delay,
7168 pubkeys: counterparty_pubkeys,
7170 funding_outpoint: None,
7171 channel_type_features: channel_type.clone()
7173 funding_transaction: None,
7174 is_batch_funding: None,
7176 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7177 counterparty_prev_commitment_point: None,
7178 counterparty_node_id,
7180 counterparty_shutdown_scriptpubkey,
7182 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7184 channel_update_status: ChannelUpdateStatus::Enabled,
7185 closing_signed_in_flight: false,
7187 announcement_sigs: None,
7189 #[cfg(any(test, fuzzing))]
7190 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7191 #[cfg(any(test, fuzzing))]
7192 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7194 workaround_lnd_bug_4006: None,
7195 sent_message_awaiting_response: None,
7197 latest_inbound_scid_alias: None,
7198 outbound_scid_alias: 0,
7200 channel_pending_event_emitted: false,
7201 channel_ready_event_emitted: false,
7203 #[cfg(any(test, fuzzing))]
7204 historical_inbound_htlc_fulfills: HashSet::new(),
7209 blocked_monitor_updates: Vec::new(),
7211 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7217 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7218 /// should be sent back to the counterparty node.
7220 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7221 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7222 if self.context.is_outbound() {
7223 panic!("Tried to send accept_channel for an outbound channel?");
7226 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7227 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7229 panic!("Tried to send accept_channel after channel had moved forward");
7231 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7232 panic!("Tried to send an accept_channel for a channel that has already advanced");
7235 self.generate_accept_channel_message()
7238 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7239 /// inbound channel. If the intention is to accept an inbound channel, use
7240 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7242 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7243 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7244 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7245 let keys = self.context.get_holder_pubkeys();
7247 msgs::AcceptChannel {
7248 temporary_channel_id: self.context.channel_id,
7249 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7250 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7251 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7252 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7253 minimum_depth: self.context.minimum_depth.unwrap(),
7254 to_self_delay: self.context.get_holder_selected_contest_delay(),
7255 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7256 funding_pubkey: keys.funding_pubkey,
7257 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7258 payment_point: keys.payment_point,
7259 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7260 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7261 first_per_commitment_point,
7262 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7263 Some(script) => script.clone().into_inner(),
7264 None => Builder::new().into_script(),
7266 channel_type: Some(self.context.channel_type.clone()),
7268 next_local_nonce: None,
7272 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7273 /// inbound channel without accepting it.
7275 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7277 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7278 self.generate_accept_channel_message()
7281 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7282 let funding_script = self.context.get_funding_redeemscript();
7284 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7285 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7286 let trusted_tx = initial_commitment_tx.trust();
7287 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7288 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7289 // They sign the holder commitment transaction...
7290 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7291 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7292 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7293 encode::serialize_hex(&funding_script), &self.context.channel_id());
7294 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7296 Ok(initial_commitment_tx)
7299 pub fn funding_created<L: Deref>(
7300 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7301 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7305 if self.context.is_outbound() {
7306 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7309 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7310 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7312 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7313 // remember the channel, so it's safe to just send an error_message here and drop the
7315 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7317 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7318 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7319 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7320 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7323 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7324 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7325 // This is an externally observable change before we finish all our checks. In particular
7326 // check_funding_created_signature may fail.
7327 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7329 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7331 Err(ChannelError::Close(e)) => {
7332 self.context.channel_transaction_parameters.funding_outpoint = None;
7333 return Err((self, ChannelError::Close(e)));
7336 // The only error we know how to handle is ChannelError::Close, so we fall over here
7337 // to make sure we don't continue with an inconsistent state.
7338 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7342 let holder_commitment_tx = HolderCommitmentTransaction::new(
7343 initial_commitment_tx,
7346 &self.context.get_holder_pubkeys().funding_pubkey,
7347 self.context.counterparty_funding_pubkey()
7350 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7351 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7354 // Now that we're past error-generating stuff, update our local state:
7356 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7357 self.context.channel_id = funding_txo.to_channel_id();
7358 self.context.cur_counterparty_commitment_transaction_number -= 1;
7359 self.context.cur_holder_commitment_transaction_number -= 1;
7361 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7363 let funding_redeemscript = self.context.get_funding_redeemscript();
7364 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7365 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7366 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7367 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7368 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7369 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7370 shutdown_script, self.context.get_holder_selected_contest_delay(),
7371 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7372 &self.context.channel_transaction_parameters,
7373 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7375 holder_commitment_tx, best_block, self.context.counterparty_node_id);
7376 channel_monitor.provide_initial_counterparty_commitment_tx(
7377 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7378 self.context.cur_counterparty_commitment_transaction_number + 1,
7379 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7380 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7381 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7383 log_info!(logger, "{} funding_signed for peer for channel {}",
7384 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7386 // Promote the channel to a full-fledged one now that we have updated the state and have a
7387 // `ChannelMonitor`.
7388 let mut channel = Channel {
7389 context: self.context,
7391 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7392 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7394 Ok((channel, funding_signed, channel_monitor))
7398 const SERIALIZATION_VERSION: u8 = 3;
7399 const MIN_SERIALIZATION_VERSION: u8 = 3;
7401 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7407 impl Writeable for ChannelUpdateStatus {
7408 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7409 // We only care about writing out the current state as it was announced, ie only either
7410 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7411 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7413 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7414 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7415 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7416 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7422 impl Readable for ChannelUpdateStatus {
7423 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7424 Ok(match <u8 as Readable>::read(reader)? {
7425 0 => ChannelUpdateStatus::Enabled,
7426 1 => ChannelUpdateStatus::Disabled,
7427 _ => return Err(DecodeError::InvalidValue),
7432 impl Writeable for AnnouncementSigsState {
7433 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7434 // We only care about writing out the current state as if we had just disconnected, at
7435 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7437 AnnouncementSigsState::NotSent => 0u8.write(writer),
7438 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7439 AnnouncementSigsState::Committed => 0u8.write(writer),
7440 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7445 impl Readable for AnnouncementSigsState {
7446 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7447 Ok(match <u8 as Readable>::read(reader)? {
7448 0 => AnnouncementSigsState::NotSent,
7449 1 => AnnouncementSigsState::PeerReceived,
7450 _ => return Err(DecodeError::InvalidValue),
7455 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7456 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7457 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7460 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7462 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7463 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7464 // the low bytes now and the optional high bytes later.
7465 let user_id_low = self.context.user_id as u64;
7466 user_id_low.write(writer)?;
7468 // Version 1 deserializers expected to read parts of the config object here. Version 2
7469 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7470 // `minimum_depth` we simply write dummy values here.
7471 writer.write_all(&[0; 8])?;
7473 self.context.channel_id.write(writer)?;
7475 let mut channel_state = self.context.channel_state;
7476 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7477 channel_state.set_peer_disconnected();
7479 channel_state.to_u32().write(writer)?;
7481 self.context.channel_value_satoshis.write(writer)?;
7483 self.context.latest_monitor_update_id.write(writer)?;
7485 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7486 // deserialized from that format.
7487 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7488 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7489 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7491 self.context.destination_script.write(writer)?;
7493 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7494 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7495 self.context.value_to_self_msat.write(writer)?;
7497 let mut dropped_inbound_htlcs = 0;
7498 for htlc in self.context.pending_inbound_htlcs.iter() {
7499 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7500 dropped_inbound_htlcs += 1;
7503 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7504 for htlc in self.context.pending_inbound_htlcs.iter() {
7505 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7508 htlc.htlc_id.write(writer)?;
7509 htlc.amount_msat.write(writer)?;
7510 htlc.cltv_expiry.write(writer)?;
7511 htlc.payment_hash.write(writer)?;
7513 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7514 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7516 htlc_state.write(writer)?;
7518 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7520 htlc_state.write(writer)?;
7522 &InboundHTLCState::Committed => {
7525 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7527 removal_reason.write(writer)?;
7532 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7533 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7534 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7536 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7537 for htlc in self.context.pending_outbound_htlcs.iter() {
7538 htlc.htlc_id.write(writer)?;
7539 htlc.amount_msat.write(writer)?;
7540 htlc.cltv_expiry.write(writer)?;
7541 htlc.payment_hash.write(writer)?;
7542 htlc.source.write(writer)?;
7544 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7546 onion_packet.write(writer)?;
7548 &OutboundHTLCState::Committed => {
7551 &OutboundHTLCState::RemoteRemoved(_) => {
7552 // Treat this as a Committed because we haven't received the CS - they'll
7553 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7556 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7558 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7559 preimages.push(preimage);
7561 let reason: Option<&HTLCFailReason> = outcome.into();
7562 reason.write(writer)?;
7564 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7566 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7567 preimages.push(preimage);
7569 let reason: Option<&HTLCFailReason> = outcome.into();
7570 reason.write(writer)?;
7573 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7574 pending_outbound_blinding_points.push(htlc.blinding_point);
7577 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7578 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7579 // Vec of (htlc_id, failure_code, sha256_of_onion)
7580 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
7581 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7582 for update in self.context.holding_cell_htlc_updates.iter() {
7584 &HTLCUpdateAwaitingACK::AddHTLC {
7585 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7586 blinding_point, skimmed_fee_msat,
7589 amount_msat.write(writer)?;
7590 cltv_expiry.write(writer)?;
7591 payment_hash.write(writer)?;
7592 source.write(writer)?;
7593 onion_routing_packet.write(writer)?;
7595 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7596 holding_cell_blinding_points.push(blinding_point);
7598 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7600 payment_preimage.write(writer)?;
7601 htlc_id.write(writer)?;
7603 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7605 htlc_id.write(writer)?;
7606 err_packet.write(writer)?;
7608 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
7609 htlc_id, failure_code, sha256_of_onion
7611 // We don't want to break downgrading by adding a new variant, so write a dummy
7612 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
7613 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
7615 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
7617 htlc_id.write(writer)?;
7618 dummy_err_packet.write(writer)?;
7623 match self.context.resend_order {
7624 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7625 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7628 self.context.monitor_pending_channel_ready.write(writer)?;
7629 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7630 self.context.monitor_pending_commitment_signed.write(writer)?;
7632 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7633 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7634 pending_forward.write(writer)?;
7635 htlc_id.write(writer)?;
7638 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7639 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7640 htlc_source.write(writer)?;
7641 payment_hash.write(writer)?;
7642 fail_reason.write(writer)?;
7645 if self.context.is_outbound() {
7646 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7647 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7648 Some(feerate).write(writer)?;
7650 // As for inbound HTLCs, if the update was only announced and never committed in a
7651 // commitment_signed, drop it.
7652 None::<u32>.write(writer)?;
7654 self.context.holding_cell_update_fee.write(writer)?;
7656 self.context.next_holder_htlc_id.write(writer)?;
7657 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7658 self.context.update_time_counter.write(writer)?;
7659 self.context.feerate_per_kw.write(writer)?;
7661 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7662 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7663 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7664 // consider the stale state on reload.
7667 self.context.funding_tx_confirmed_in.write(writer)?;
7668 self.context.funding_tx_confirmation_height.write(writer)?;
7669 self.context.short_channel_id.write(writer)?;
7671 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7672 self.context.holder_dust_limit_satoshis.write(writer)?;
7673 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7675 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7676 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7678 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7679 self.context.holder_htlc_minimum_msat.write(writer)?;
7680 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7682 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7683 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7685 match &self.context.counterparty_forwarding_info {
7688 info.fee_base_msat.write(writer)?;
7689 info.fee_proportional_millionths.write(writer)?;
7690 info.cltv_expiry_delta.write(writer)?;
7692 None => 0u8.write(writer)?
7695 self.context.channel_transaction_parameters.write(writer)?;
7696 self.context.funding_transaction.write(writer)?;
7698 self.context.counterparty_cur_commitment_point.write(writer)?;
7699 self.context.counterparty_prev_commitment_point.write(writer)?;
7700 self.context.counterparty_node_id.write(writer)?;
7702 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7704 self.context.commitment_secrets.write(writer)?;
7706 self.context.channel_update_status.write(writer)?;
7708 #[cfg(any(test, fuzzing))]
7709 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7710 #[cfg(any(test, fuzzing))]
7711 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7712 htlc.write(writer)?;
7715 // If the channel type is something other than only-static-remote-key, then we need to have
7716 // older clients fail to deserialize this channel at all. If the type is
7717 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7719 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7720 Some(&self.context.channel_type) } else { None };
7722 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7723 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7724 // a different percentage of the channel value then 10%, which older versions of LDK used
7725 // to set it to before the percentage was made configurable.
7726 let serialized_holder_selected_reserve =
7727 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7728 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7730 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7731 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7732 let serialized_holder_htlc_max_in_flight =
7733 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7734 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7736 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7737 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7739 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7740 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7741 // we write the high bytes as an option here.
7742 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7744 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7746 write_tlv_fields!(writer, {
7747 (0, self.context.announcement_sigs, option),
7748 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7749 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7750 // them twice, once with their original default values above, and once as an option
7751 // here. On the read side, old versions will simply ignore the odd-type entries here,
7752 // and new versions map the default values to None and allow the TLV entries here to
7754 (1, self.context.minimum_depth, option),
7755 (2, chan_type, option),
7756 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7757 (4, serialized_holder_selected_reserve, option),
7758 (5, self.context.config, required),
7759 (6, serialized_holder_htlc_max_in_flight, option),
7760 (7, self.context.shutdown_scriptpubkey, option),
7761 (8, self.context.blocked_monitor_updates, optional_vec),
7762 (9, self.context.target_closing_feerate_sats_per_kw, option),
7763 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7764 (13, self.context.channel_creation_height, required),
7765 (15, preimages, required_vec),
7766 (17, self.context.announcement_sigs_state, required),
7767 (19, self.context.latest_inbound_scid_alias, option),
7768 (21, self.context.outbound_scid_alias, required),
7769 (23, channel_ready_event_emitted, option),
7770 (25, user_id_high_opt, option),
7771 (27, self.context.channel_keys_id, required),
7772 (28, holder_max_accepted_htlcs, option),
7773 (29, self.context.temporary_channel_id, option),
7774 (31, channel_pending_event_emitted, option),
7775 (35, pending_outbound_skimmed_fees, optional_vec),
7776 (37, holding_cell_skimmed_fees, optional_vec),
7777 (38, self.context.is_batch_funding, option),
7778 (39, pending_outbound_blinding_points, optional_vec),
7779 (41, holding_cell_blinding_points, optional_vec),
7780 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
7787 const MAX_ALLOC_SIZE: usize = 64*1024;
7788 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7790 ES::Target: EntropySource,
7791 SP::Target: SignerProvider
7793 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7794 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7795 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7797 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7798 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7799 // the low bytes now and the high bytes later.
7800 let user_id_low: u64 = Readable::read(reader)?;
7802 let mut config = Some(LegacyChannelConfig::default());
7804 // Read the old serialization of the ChannelConfig from version 0.0.98.
7805 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7806 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7807 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7808 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7810 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7811 let mut _val: u64 = Readable::read(reader)?;
7814 let channel_id = Readable::read(reader)?;
7815 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
7816 let channel_value_satoshis = Readable::read(reader)?;
7818 let latest_monitor_update_id = Readable::read(reader)?;
7820 let mut keys_data = None;
7822 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7823 // the `channel_keys_id` TLV is present below.
7824 let keys_len: u32 = Readable::read(reader)?;
7825 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7826 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7827 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7828 let mut data = [0; 1024];
7829 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7830 reader.read_exact(read_slice)?;
7831 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7835 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7836 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7837 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7840 let destination_script = Readable::read(reader)?;
7842 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7843 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7844 let value_to_self_msat = Readable::read(reader)?;
7846 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7848 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7849 for _ in 0..pending_inbound_htlc_count {
7850 pending_inbound_htlcs.push(InboundHTLCOutput {
7851 htlc_id: Readable::read(reader)?,
7852 amount_msat: Readable::read(reader)?,
7853 cltv_expiry: Readable::read(reader)?,
7854 payment_hash: Readable::read(reader)?,
7855 state: match <u8 as Readable>::read(reader)? {
7856 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7857 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7858 3 => InboundHTLCState::Committed,
7859 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7860 _ => return Err(DecodeError::InvalidValue),
7865 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7866 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7867 for _ in 0..pending_outbound_htlc_count {
7868 pending_outbound_htlcs.push(OutboundHTLCOutput {
7869 htlc_id: Readable::read(reader)?,
7870 amount_msat: Readable::read(reader)?,
7871 cltv_expiry: Readable::read(reader)?,
7872 payment_hash: Readable::read(reader)?,
7873 source: Readable::read(reader)?,
7874 state: match <u8 as Readable>::read(reader)? {
7875 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7876 1 => OutboundHTLCState::Committed,
7878 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7879 OutboundHTLCState::RemoteRemoved(option.into())
7882 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7883 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7886 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7887 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7889 _ => return Err(DecodeError::InvalidValue),
7891 skimmed_fee_msat: None,
7892 blinding_point: None,
7896 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7897 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7898 for _ in 0..holding_cell_htlc_update_count {
7899 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7900 0 => HTLCUpdateAwaitingACK::AddHTLC {
7901 amount_msat: Readable::read(reader)?,
7902 cltv_expiry: Readable::read(reader)?,
7903 payment_hash: Readable::read(reader)?,
7904 source: Readable::read(reader)?,
7905 onion_routing_packet: Readable::read(reader)?,
7906 skimmed_fee_msat: None,
7907 blinding_point: None,
7909 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7910 payment_preimage: Readable::read(reader)?,
7911 htlc_id: Readable::read(reader)?,
7913 2 => HTLCUpdateAwaitingACK::FailHTLC {
7914 htlc_id: Readable::read(reader)?,
7915 err_packet: Readable::read(reader)?,
7917 _ => return Err(DecodeError::InvalidValue),
7921 let resend_order = match <u8 as Readable>::read(reader)? {
7922 0 => RAACommitmentOrder::CommitmentFirst,
7923 1 => RAACommitmentOrder::RevokeAndACKFirst,
7924 _ => return Err(DecodeError::InvalidValue),
7927 let monitor_pending_channel_ready = Readable::read(reader)?;
7928 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7929 let monitor_pending_commitment_signed = Readable::read(reader)?;
7931 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7932 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7933 for _ in 0..monitor_pending_forwards_count {
7934 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7937 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7938 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7939 for _ in 0..monitor_pending_failures_count {
7940 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7943 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7945 let holding_cell_update_fee = Readable::read(reader)?;
7947 let next_holder_htlc_id = Readable::read(reader)?;
7948 let next_counterparty_htlc_id = Readable::read(reader)?;
7949 let update_time_counter = Readable::read(reader)?;
7950 let feerate_per_kw = Readable::read(reader)?;
7952 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7953 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7954 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7955 // consider the stale state on reload.
7956 match <u8 as Readable>::read(reader)? {
7959 let _: u32 = Readable::read(reader)?;
7960 let _: u64 = Readable::read(reader)?;
7961 let _: Signature = Readable::read(reader)?;
7963 _ => return Err(DecodeError::InvalidValue),
7966 let funding_tx_confirmed_in = Readable::read(reader)?;
7967 let funding_tx_confirmation_height = Readable::read(reader)?;
7968 let short_channel_id = Readable::read(reader)?;
7970 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7971 let holder_dust_limit_satoshis = Readable::read(reader)?;
7972 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7973 let mut counterparty_selected_channel_reserve_satoshis = None;
7975 // Read the old serialization from version 0.0.98.
7976 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7978 // Read the 8 bytes of backwards-compatibility data.
7979 let _dummy: u64 = Readable::read(reader)?;
7981 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7982 let holder_htlc_minimum_msat = Readable::read(reader)?;
7983 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7985 let mut minimum_depth = None;
7987 // Read the old serialization from version 0.0.98.
7988 minimum_depth = Some(Readable::read(reader)?);
7990 // Read the 4 bytes of backwards-compatibility data.
7991 let _dummy: u32 = Readable::read(reader)?;
7994 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7996 1 => Some(CounterpartyForwardingInfo {
7997 fee_base_msat: Readable::read(reader)?,
7998 fee_proportional_millionths: Readable::read(reader)?,
7999 cltv_expiry_delta: Readable::read(reader)?,
8001 _ => return Err(DecodeError::InvalidValue),
8004 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
8005 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
8007 let counterparty_cur_commitment_point = Readable::read(reader)?;
8009 let counterparty_prev_commitment_point = Readable::read(reader)?;
8010 let counterparty_node_id = Readable::read(reader)?;
8012 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
8013 let commitment_secrets = Readable::read(reader)?;
8015 let channel_update_status = Readable::read(reader)?;
8017 #[cfg(any(test, fuzzing))]
8018 let mut historical_inbound_htlc_fulfills = HashSet::new();
8019 #[cfg(any(test, fuzzing))]
8021 let htlc_fulfills_len: u64 = Readable::read(reader)?;
8022 for _ in 0..htlc_fulfills_len {
8023 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
8027 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
8028 Some((feerate, if channel_parameters.is_outbound_from_holder {
8029 FeeUpdateState::Outbound
8031 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
8037 let mut announcement_sigs = None;
8038 let mut target_closing_feerate_sats_per_kw = None;
8039 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
8040 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
8041 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
8042 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
8043 // only, so we default to that if none was written.
8044 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
8045 let mut channel_creation_height = Some(serialized_height);
8046 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
8048 // If we read an old Channel, for simplicity we just treat it as "we never sent an
8049 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
8050 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
8051 let mut latest_inbound_scid_alias = None;
8052 let mut outbound_scid_alias = None;
8053 let mut channel_pending_event_emitted = None;
8054 let mut channel_ready_event_emitted = None;
8056 let mut user_id_high_opt: Option<u64> = None;
8057 let mut channel_keys_id: Option<[u8; 32]> = None;
8058 let mut temporary_channel_id: Option<ChannelId> = None;
8059 let mut holder_max_accepted_htlcs: Option<u16> = None;
8061 let mut blocked_monitor_updates = Some(Vec::new());
8063 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8064 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8066 let mut is_batch_funding: Option<()> = None;
8068 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8069 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8071 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8073 read_tlv_fields!(reader, {
8074 (0, announcement_sigs, option),
8075 (1, minimum_depth, option),
8076 (2, channel_type, option),
8077 (3, counterparty_selected_channel_reserve_satoshis, option),
8078 (4, holder_selected_channel_reserve_satoshis, option),
8079 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8080 (6, holder_max_htlc_value_in_flight_msat, option),
8081 (7, shutdown_scriptpubkey, option),
8082 (8, blocked_monitor_updates, optional_vec),
8083 (9, target_closing_feerate_sats_per_kw, option),
8084 (11, monitor_pending_finalized_fulfills, optional_vec),
8085 (13, channel_creation_height, option),
8086 (15, preimages_opt, optional_vec),
8087 (17, announcement_sigs_state, option),
8088 (19, latest_inbound_scid_alias, option),
8089 (21, outbound_scid_alias, option),
8090 (23, channel_ready_event_emitted, option),
8091 (25, user_id_high_opt, option),
8092 (27, channel_keys_id, option),
8093 (28, holder_max_accepted_htlcs, option),
8094 (29, temporary_channel_id, option),
8095 (31, channel_pending_event_emitted, option),
8096 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8097 (37, holding_cell_skimmed_fees_opt, optional_vec),
8098 (38, is_batch_funding, option),
8099 (39, pending_outbound_blinding_points_opt, optional_vec),
8100 (41, holding_cell_blinding_points_opt, optional_vec),
8101 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8104 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8105 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8106 // If we've gotten to the funding stage of the channel, populate the signer with its
8107 // required channel parameters.
8108 if channel_state >= ChannelState::FundingNegotiated {
8109 holder_signer.provide_channel_parameters(&channel_parameters);
8111 (channel_keys_id, holder_signer)
8113 // `keys_data` can be `None` if we had corrupted data.
8114 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8115 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8116 (holder_signer.channel_keys_id(), holder_signer)
8119 if let Some(preimages) = preimages_opt {
8120 let mut iter = preimages.into_iter();
8121 for htlc in pending_outbound_htlcs.iter_mut() {
8123 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8124 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8126 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8127 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8132 // We expect all preimages to be consumed above
8133 if iter.next().is_some() {
8134 return Err(DecodeError::InvalidValue);
8138 let chan_features = channel_type.as_ref().unwrap();
8139 if !chan_features.is_subset(our_supported_features) {
8140 // If the channel was written by a new version and negotiated with features we don't
8141 // understand yet, refuse to read it.
8142 return Err(DecodeError::UnknownRequiredFeature);
8145 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8146 // To account for that, we're proactively setting/overriding the field here.
8147 channel_parameters.channel_type_features = chan_features.clone();
8149 let mut secp_ctx = Secp256k1::new();
8150 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8152 // `user_id` used to be a single u64 value. In order to remain backwards
8153 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8154 // separate u64 values.
8155 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8157 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8159 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8160 let mut iter = skimmed_fees.into_iter();
8161 for htlc in pending_outbound_htlcs.iter_mut() {
8162 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8164 // We expect all skimmed fees to be consumed above
8165 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8167 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8168 let mut iter = skimmed_fees.into_iter();
8169 for htlc in holding_cell_htlc_updates.iter_mut() {
8170 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8171 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8174 // We expect all skimmed fees to be consumed above
8175 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8177 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8178 let mut iter = blinding_pts.into_iter();
8179 for htlc in pending_outbound_htlcs.iter_mut() {
8180 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8182 // We expect all blinding points to be consumed above
8183 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8185 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8186 let mut iter = blinding_pts.into_iter();
8187 for htlc in holding_cell_htlc_updates.iter_mut() {
8188 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8189 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8192 // We expect all blinding points to be consumed above
8193 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8196 if let Some(malformed_htlcs) = malformed_htlcs {
8197 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
8198 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
8199 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
8200 let matches = *htlc_id == malformed_htlc_id;
8201 if matches { debug_assert!(err_packet.data.is_empty()) }
8204 }).ok_or(DecodeError::InvalidValue)?;
8205 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
8206 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
8208 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
8213 context: ChannelContext {
8216 config: config.unwrap(),
8220 // Note that we don't care about serializing handshake limits as we only ever serialize
8221 // channel data after the handshake has completed.
8222 inbound_handshake_limits_override: None,
8225 temporary_channel_id,
8227 announcement_sigs_state: announcement_sigs_state.unwrap(),
8229 channel_value_satoshis,
8231 latest_monitor_update_id,
8233 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8234 shutdown_scriptpubkey,
8237 cur_holder_commitment_transaction_number,
8238 cur_counterparty_commitment_transaction_number,
8241 holder_max_accepted_htlcs,
8242 pending_inbound_htlcs,
8243 pending_outbound_htlcs,
8244 holding_cell_htlc_updates,
8248 monitor_pending_channel_ready,
8249 monitor_pending_revoke_and_ack,
8250 monitor_pending_commitment_signed,
8251 monitor_pending_forwards,
8252 monitor_pending_failures,
8253 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8255 signer_pending_commitment_update: false,
8256 signer_pending_funding: false,
8259 holding_cell_update_fee,
8260 next_holder_htlc_id,
8261 next_counterparty_htlc_id,
8262 update_time_counter,
8265 #[cfg(debug_assertions)]
8266 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8267 #[cfg(debug_assertions)]
8268 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8270 last_sent_closing_fee: None,
8271 pending_counterparty_closing_signed: None,
8272 expecting_peer_commitment_signed: false,
8273 closing_fee_limits: None,
8274 target_closing_feerate_sats_per_kw,
8276 funding_tx_confirmed_in,
8277 funding_tx_confirmation_height,
8279 channel_creation_height: channel_creation_height.unwrap(),
8281 counterparty_dust_limit_satoshis,
8282 holder_dust_limit_satoshis,
8283 counterparty_max_htlc_value_in_flight_msat,
8284 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8285 counterparty_selected_channel_reserve_satoshis,
8286 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8287 counterparty_htlc_minimum_msat,
8288 holder_htlc_minimum_msat,
8289 counterparty_max_accepted_htlcs,
8292 counterparty_forwarding_info,
8294 channel_transaction_parameters: channel_parameters,
8295 funding_transaction,
8298 counterparty_cur_commitment_point,
8299 counterparty_prev_commitment_point,
8300 counterparty_node_id,
8302 counterparty_shutdown_scriptpubkey,
8306 channel_update_status,
8307 closing_signed_in_flight: false,
8311 #[cfg(any(test, fuzzing))]
8312 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8313 #[cfg(any(test, fuzzing))]
8314 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8316 workaround_lnd_bug_4006: None,
8317 sent_message_awaiting_response: None,
8319 latest_inbound_scid_alias,
8320 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8321 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8323 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8324 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8326 #[cfg(any(test, fuzzing))]
8327 historical_inbound_htlc_fulfills,
8329 channel_type: channel_type.unwrap(),
8332 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8341 use bitcoin::blockdata::constants::ChainHash;
8342 use bitcoin::blockdata::script::{ScriptBuf, Builder};
8343 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8344 use bitcoin::blockdata::opcodes;
8345 use bitcoin::network::constants::Network;
8346 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
8347 use crate::ln::{PaymentHash, PaymentPreimage};
8348 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8349 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8350 use crate::ln::channel::InitFeatures;
8351 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8352 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8353 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8354 use crate::ln::msgs;
8355 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8356 use crate::ln::script::ShutdownScript;
8357 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8358 use crate::chain::BestBlock;
8359 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8360 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8361 use crate::chain::transaction::OutPoint;
8362 use crate::routing::router::{Path, RouteHop};
8363 use crate::util::config::UserConfig;
8364 use crate::util::errors::APIError;
8365 use crate::util::ser::{ReadableArgs, Writeable};
8366 use crate::util::test_utils;
8367 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8368 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8369 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8370 use bitcoin::secp256k1::{SecretKey,PublicKey};
8371 use bitcoin::hashes::sha256::Hash as Sha256;
8372 use bitcoin::hashes::Hash;
8373 use bitcoin::hashes::hex::FromHex;
8374 use bitcoin::hash_types::WPubkeyHash;
8375 use bitcoin::blockdata::locktime::absolute::LockTime;
8376 use bitcoin::address::{WitnessProgram, WitnessVersion};
8377 use crate::prelude::*;
8380 fn test_channel_state_order() {
8381 use crate::ln::channel::NegotiatingFundingFlags;
8382 use crate::ln::channel::AwaitingChannelReadyFlags;
8383 use crate::ln::channel::ChannelReadyFlags;
8385 assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
8386 assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
8387 assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
8388 assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
8391 struct TestFeeEstimator {
8394 impl FeeEstimator for TestFeeEstimator {
8395 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8401 fn test_max_funding_satoshis_no_wumbo() {
8402 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8403 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8404 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8408 signer: InMemorySigner,
8411 impl EntropySource for Keys {
8412 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8415 impl SignerProvider for Keys {
8416 type EcdsaSigner = InMemorySigner;
8418 type TaprootSigner = InMemorySigner;
8420 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8421 self.signer.channel_keys_id()
8424 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8428 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8430 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8431 let secp_ctx = Secp256k1::signing_only();
8432 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8433 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8434 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8437 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8438 let secp_ctx = Secp256k1::signing_only();
8439 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8440 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8444 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8445 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8446 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8450 fn upfront_shutdown_script_incompatibility() {
8451 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8452 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8453 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8456 let seed = [42; 32];
8457 let network = Network::Testnet;
8458 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8459 keys_provider.expect(OnGetShutdownScriptpubkey {
8460 returns: non_v0_segwit_shutdown_script.clone(),
8463 let secp_ctx = Secp256k1::new();
8464 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8465 let config = UserConfig::default();
8466 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8467 Err(APIError::IncompatibleShutdownScript { script }) => {
8468 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8470 Err(e) => panic!("Unexpected error: {:?}", e),
8471 Ok(_) => panic!("Expected error"),
8475 // Check that, during channel creation, we use the same feerate in the open channel message
8476 // as we do in the Channel object creation itself.
8478 fn test_open_channel_msg_fee() {
8479 let original_fee = 253;
8480 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8481 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8482 let secp_ctx = Secp256k1::new();
8483 let seed = [42; 32];
8484 let network = Network::Testnet;
8485 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8487 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8488 let config = UserConfig::default();
8489 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8491 // Now change the fee so we can check that the fee in the open_channel message is the
8492 // same as the old fee.
8493 fee_est.fee_est = 500;
8494 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8495 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8499 fn test_holder_vs_counterparty_dust_limit() {
8500 // Test that when calculating the local and remote commitment transaction fees, the correct
8501 // dust limits are used.
8502 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8503 let secp_ctx = Secp256k1::new();
8504 let seed = [42; 32];
8505 let network = Network::Testnet;
8506 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8507 let logger = test_utils::TestLogger::new();
8508 let best_block = BestBlock::from_network(network);
8510 // Go through the flow of opening a channel between two nodes, making sure
8511 // they have different dust limits.
8513 // Create Node A's channel pointing to Node B's pubkey
8514 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8515 let config = UserConfig::default();
8516 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8518 // Create Node B's channel by receiving Node A's open_channel message
8519 // Make sure A's dust limit is as we expect.
8520 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8521 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8522 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8524 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8525 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8526 accept_channel_msg.dust_limit_satoshis = 546;
8527 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8528 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8530 // Node A --> Node B: funding created
8531 let output_script = node_a_chan.context.get_funding_redeemscript();
8532 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8533 value: 10000000, script_pubkey: output_script.clone(),
8535 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8536 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8537 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8539 // Node B --> Node A: funding signed
8540 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8541 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8543 // Put some inbound and outbound HTLCs in A's channel.
8544 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8545 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8547 amount_msat: htlc_amount_msat,
8548 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8549 cltv_expiry: 300000000,
8550 state: InboundHTLCState::Committed,
8553 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8555 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8556 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8557 cltv_expiry: 200000000,
8558 state: OutboundHTLCState::Committed,
8559 source: HTLCSource::OutboundRoute {
8560 path: Path { hops: Vec::new(), blinded_tail: None },
8561 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8562 first_hop_htlc_msat: 548,
8563 payment_id: PaymentId([42; 32]),
8565 skimmed_fee_msat: None,
8566 blinding_point: None,
8569 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8570 // the dust limit check.
8571 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8572 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8573 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8574 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8576 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8577 // of the HTLCs are seen to be above the dust limit.
8578 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8579 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8580 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8581 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8582 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8586 fn test_timeout_vs_success_htlc_dust_limit() {
8587 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8588 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8589 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8590 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8591 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8592 let secp_ctx = Secp256k1::new();
8593 let seed = [42; 32];
8594 let network = Network::Testnet;
8595 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8597 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8598 let config = UserConfig::default();
8599 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8601 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8602 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8604 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8605 // counted as dust when it shouldn't be.
8606 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8607 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8608 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8609 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8611 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8612 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8613 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8614 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8615 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8617 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8619 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8620 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8621 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8622 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8623 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8625 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8626 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8627 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8628 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8629 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8633 fn channel_reestablish_no_updates() {
8634 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8635 let logger = test_utils::TestLogger::new();
8636 let secp_ctx = Secp256k1::new();
8637 let seed = [42; 32];
8638 let network = Network::Testnet;
8639 let best_block = BestBlock::from_network(network);
8640 let chain_hash = ChainHash::using_genesis_block(network);
8641 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8643 // Go through the flow of opening a channel between two nodes.
8645 // Create Node A's channel pointing to Node B's pubkey
8646 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8647 let config = UserConfig::default();
8648 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8650 // Create Node B's channel by receiving Node A's open_channel message
8651 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8652 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8653 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8655 // Node B --> Node A: accept channel
8656 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8657 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8659 // Node A --> Node B: funding created
8660 let output_script = node_a_chan.context.get_funding_redeemscript();
8661 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8662 value: 10000000, script_pubkey: output_script.clone(),
8664 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8665 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8666 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8668 // Node B --> Node A: funding signed
8669 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8670 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8672 // Now disconnect the two nodes and check that the commitment point in
8673 // Node B's channel_reestablish message is sane.
8674 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8675 let msg = node_b_chan.get_channel_reestablish(&&logger);
8676 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8677 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8678 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8680 // Check that the commitment point in Node A's channel_reestablish message
8682 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8683 let msg = node_a_chan.get_channel_reestablish(&&logger);
8684 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8685 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8686 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8690 fn test_configured_holder_max_htlc_value_in_flight() {
8691 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8692 let logger = test_utils::TestLogger::new();
8693 let secp_ctx = Secp256k1::new();
8694 let seed = [42; 32];
8695 let network = Network::Testnet;
8696 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8697 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8698 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8700 let mut config_2_percent = UserConfig::default();
8701 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8702 let mut config_99_percent = UserConfig::default();
8703 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8704 let mut config_0_percent = UserConfig::default();
8705 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8706 let mut config_101_percent = UserConfig::default();
8707 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8709 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8710 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8711 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8712 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8713 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8714 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8716 // Test with the upper bound - 1 of valid values (99%).
8717 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8718 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8719 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8721 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8723 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8724 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8725 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8726 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8727 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8728 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8730 // Test with the upper bound - 1 of valid values (99%).
8731 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8732 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8733 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8735 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8736 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8737 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8738 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8739 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8741 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8742 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8744 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8745 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8746 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8748 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8749 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8750 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8751 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8752 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8754 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8755 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8757 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8758 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8759 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8763 fn test_configured_holder_selected_channel_reserve_satoshis() {
8765 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8766 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8767 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8769 // Test with valid but unreasonably high channel reserves
8770 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8771 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8772 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8774 // Test with calculated channel reserve less than lower bound
8775 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8776 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8778 // Test with invalid channel reserves since sum of both is greater than or equal
8780 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8781 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8784 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8785 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8786 let logger = test_utils::TestLogger::new();
8787 let secp_ctx = Secp256k1::new();
8788 let seed = [42; 32];
8789 let network = Network::Testnet;
8790 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8791 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8792 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8795 let mut outbound_node_config = UserConfig::default();
8796 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8797 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8799 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8800 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8802 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8803 let mut inbound_node_config = UserConfig::default();
8804 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8806 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8807 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8809 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8811 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8812 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8814 // Channel Negotiations failed
8815 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8816 assert!(result.is_err());
8821 fn channel_update() {
8822 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8823 let logger = test_utils::TestLogger::new();
8824 let secp_ctx = Secp256k1::new();
8825 let seed = [42; 32];
8826 let network = Network::Testnet;
8827 let best_block = BestBlock::from_network(network);
8828 let chain_hash = ChainHash::using_genesis_block(network);
8829 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8831 // Create Node A's channel pointing to Node B's pubkey
8832 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8833 let config = UserConfig::default();
8834 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8836 // Create Node B's channel by receiving Node A's open_channel message
8837 // Make sure A's dust limit is as we expect.
8838 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8839 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8840 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8842 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8843 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8844 accept_channel_msg.dust_limit_satoshis = 546;
8845 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8846 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8848 // Node A --> Node B: funding created
8849 let output_script = node_a_chan.context.get_funding_redeemscript();
8850 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8851 value: 10000000, script_pubkey: output_script.clone(),
8853 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8854 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8855 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8857 // Node B --> Node A: funding signed
8858 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8859 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8861 // Make sure that receiving a channel update will update the Channel as expected.
8862 let update = ChannelUpdate {
8863 contents: UnsignedChannelUpdate {
8865 short_channel_id: 0,
8868 cltv_expiry_delta: 100,
8869 htlc_minimum_msat: 5,
8870 htlc_maximum_msat: MAX_VALUE_MSAT,
8872 fee_proportional_millionths: 11,
8873 excess_data: Vec::new(),
8875 signature: Signature::from(unsafe { FFISignature::new() })
8877 assert!(node_a_chan.channel_update(&update).unwrap());
8879 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8880 // change our official htlc_minimum_msat.
8881 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8882 match node_a_chan.context.counterparty_forwarding_info() {
8884 assert_eq!(info.cltv_expiry_delta, 100);
8885 assert_eq!(info.fee_base_msat, 110);
8886 assert_eq!(info.fee_proportional_millionths, 11);
8888 None => panic!("expected counterparty forwarding info to be Some")
8891 assert!(!node_a_chan.channel_update(&update).unwrap());
8895 fn blinding_point_skimmed_fee_malformed_ser() {
8896 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
8898 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8899 let secp_ctx = Secp256k1::new();
8900 let seed = [42; 32];
8901 let network = Network::Testnet;
8902 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8904 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8905 let config = UserConfig::default();
8906 let features = channelmanager::provided_init_features(&config);
8907 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8908 let mut chan = Channel { context: outbound_chan.context };
8910 let dummy_htlc_source = HTLCSource::OutboundRoute {
8912 hops: vec![RouteHop {
8913 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8914 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8915 cltv_expiry_delta: 0, maybe_announced_channel: false,
8919 session_priv: test_utils::privkey(42),
8920 first_hop_htlc_msat: 0,
8921 payment_id: PaymentId([42; 32]),
8923 let dummy_outbound_output = OutboundHTLCOutput {
8926 payment_hash: PaymentHash([43; 32]),
8928 state: OutboundHTLCState::Committed,
8929 source: dummy_htlc_source.clone(),
8930 skimmed_fee_msat: None,
8931 blinding_point: None,
8933 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8934 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8936 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8939 htlc.skimmed_fee_msat = Some(1);
8942 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8944 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8947 payment_hash: PaymentHash([43; 32]),
8948 source: dummy_htlc_source.clone(),
8949 onion_routing_packet: msgs::OnionPacket {
8951 public_key: Ok(test_utils::pubkey(1)),
8952 hop_data: [0; 20*65],
8955 skimmed_fee_msat: None,
8956 blinding_point: None,
8958 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8959 payment_preimage: PaymentPreimage([42; 32]),
8962 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
8963 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
8965 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
8966 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
8968 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
8971 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8972 } else if i % 5 == 1 {
8973 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8974 } else if i % 5 == 2 {
8975 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8976 if let HTLCUpdateAwaitingACK::AddHTLC {
8977 ref mut blinding_point, ref mut skimmed_fee_msat, ..
8978 } = &mut dummy_add {
8979 *blinding_point = Some(test_utils::pubkey(42 + i));
8980 *skimmed_fee_msat = Some(42);
8982 holding_cell_htlc_updates.push(dummy_add);
8983 } else if i % 5 == 3 {
8984 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
8986 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
8989 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8991 // Encode and decode the channel and ensure that the HTLCs within are the same.
8992 let encoded_chan = chan.encode();
8993 let mut s = crate::io::Cursor::new(&encoded_chan);
8994 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8995 let features = channelmanager::provided_channel_type_features(&config);
8996 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8997 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8998 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
9001 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
9003 fn outbound_commitment_test() {
9004 use bitcoin::sighash;
9005 use bitcoin::consensus::encode::serialize;
9006 use bitcoin::sighash::EcdsaSighashType;
9007 use bitcoin::hashes::hex::FromHex;
9008 use bitcoin::hash_types::Txid;
9009 use bitcoin::secp256k1::Message;
9010 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
9011 use crate::ln::PaymentPreimage;
9012 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
9013 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
9014 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
9015 use crate::util::logger::Logger;
9016 use crate::sync::Arc;
9017 use core::str::FromStr;
9018 use hex::DisplayHex;
9020 // Test vectors from BOLT 3 Appendices C and F (anchors):
9021 let feeest = TestFeeEstimator{fee_est: 15000};
9022 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
9023 let secp_ctx = Secp256k1::new();
9025 let mut signer = InMemorySigner::new(
9027 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
9028 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9029 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9030 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
9031 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9033 // These aren't set in the test vectors:
9034 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
9040 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
9041 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
9042 let keys_provider = Keys { signer: signer.clone() };
9044 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9045 let mut config = UserConfig::default();
9046 config.channel_handshake_config.announced_channel = false;
9047 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
9048 chan.context.holder_dust_limit_satoshis = 546;
9049 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
9051 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
9053 let counterparty_pubkeys = ChannelPublicKeys {
9054 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
9055 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
9056 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
9057 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
9058 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
9060 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
9061 CounterpartyChannelTransactionParameters {
9062 pubkeys: counterparty_pubkeys.clone(),
9063 selected_contest_delay: 144
9065 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
9066 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9068 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9069 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9071 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9072 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9074 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9075 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9077 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9078 // derived from a commitment_seed, so instead we copy it here and call
9079 // build_commitment_transaction.
9080 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9081 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9082 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9083 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9084 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9086 macro_rules! test_commitment {
9087 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9088 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9089 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9093 macro_rules! test_commitment_with_anchors {
9094 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9095 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9096 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9100 macro_rules! test_commitment_common {
9101 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9102 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9104 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9105 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9107 let htlcs = commitment_stats.htlcs_included.drain(..)
9108 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9110 (commitment_stats.tx, htlcs)
9112 let trusted_tx = commitment_tx.trust();
9113 let unsigned_tx = trusted_tx.built_transaction();
9114 let redeemscript = chan.context.get_funding_redeemscript();
9115 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9116 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9117 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9118 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9120 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9121 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9122 let mut counterparty_htlc_sigs = Vec::new();
9123 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9125 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9126 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9127 counterparty_htlc_sigs.push(remote_signature);
9129 assert_eq!(htlcs.len(), per_htlc.len());
9131 let holder_commitment_tx = HolderCommitmentTransaction::new(
9132 commitment_tx.clone(),
9133 counterparty_signature,
9134 counterparty_htlc_sigs,
9135 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9136 chan.context.counterparty_funding_pubkey()
9138 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9139 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9141 let funding_redeemscript = chan.context.get_funding_redeemscript();
9142 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9143 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9145 // ((htlc, counterparty_sig), (index, holder_sig))
9146 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
9149 log_trace!(logger, "verifying htlc {}", $htlc_idx);
9150 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9152 let ref htlc = htlcs[$htlc_idx];
9153 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
9154 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9155 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9156 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9157 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9158 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9159 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9161 let mut preimage: Option<PaymentPreimage> = None;
9164 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9165 if out == htlc.payment_hash {
9166 preimage = Some(PaymentPreimage([i; 32]));
9170 assert!(preimage.is_some());
9173 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
9174 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
9175 channel_derivation_parameters: ChannelDerivationParameters {
9176 value_satoshis: chan.context.channel_value_satoshis,
9177 keys_id: chan.context.channel_keys_id,
9178 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9180 commitment_txid: trusted_tx.txid(),
9181 per_commitment_number: trusted_tx.commitment_number(),
9182 per_commitment_point: trusted_tx.per_commitment_point(),
9183 feerate_per_kw: trusted_tx.feerate_per_kw(),
9185 preimage: preimage.clone(),
9186 counterparty_sig: *htlc_counterparty_sig,
9187 }, &secp_ctx).unwrap();
9188 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9189 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9191 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9192 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9193 let trusted_tx = holder_commitment_tx.trust();
9194 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9195 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9196 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9198 assert!(htlc_counterparty_sig_iter.next().is_none());
9202 // anchors: simple commitment tx with no HTLCs and single anchor
9203 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9204 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9205 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9207 // simple commitment tx with no HTLCs
9208 chan.context.value_to_self_msat = 7000000000;
9210 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9211 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9212 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9214 // anchors: simple commitment tx with no HTLCs
9215 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9216 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9217 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9219 chan.context.pending_inbound_htlcs.push({
9220 let mut out = InboundHTLCOutput{
9222 amount_msat: 1000000,
9224 payment_hash: PaymentHash([0; 32]),
9225 state: InboundHTLCState::Committed,
9227 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9230 chan.context.pending_inbound_htlcs.push({
9231 let mut out = InboundHTLCOutput{
9233 amount_msat: 2000000,
9235 payment_hash: PaymentHash([0; 32]),
9236 state: InboundHTLCState::Committed,
9238 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9241 chan.context.pending_outbound_htlcs.push({
9242 let mut out = OutboundHTLCOutput{
9244 amount_msat: 2000000,
9246 payment_hash: PaymentHash([0; 32]),
9247 state: OutboundHTLCState::Committed,
9248 source: HTLCSource::dummy(),
9249 skimmed_fee_msat: None,
9250 blinding_point: None,
9252 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9255 chan.context.pending_outbound_htlcs.push({
9256 let mut out = OutboundHTLCOutput{
9258 amount_msat: 3000000,
9260 payment_hash: PaymentHash([0; 32]),
9261 state: OutboundHTLCState::Committed,
9262 source: HTLCSource::dummy(),
9263 skimmed_fee_msat: None,
9264 blinding_point: None,
9266 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9269 chan.context.pending_inbound_htlcs.push({
9270 let mut out = InboundHTLCOutput{
9272 amount_msat: 4000000,
9274 payment_hash: PaymentHash([0; 32]),
9275 state: InboundHTLCState::Committed,
9277 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9281 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9282 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9283 chan.context.feerate_per_kw = 0;
9285 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9286 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9287 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9290 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9291 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9292 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9295 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9296 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9297 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9300 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9301 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9302 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9305 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9306 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9307 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9310 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9311 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9312 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9315 // commitment tx with seven outputs untrimmed (maximum feerate)
9316 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9317 chan.context.feerate_per_kw = 647;
9319 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9320 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9321 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9324 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9325 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9326 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9329 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9330 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9331 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9334 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9335 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9336 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9339 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9340 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9341 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9344 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9345 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9346 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9349 // commitment tx with six outputs untrimmed (minimum feerate)
9350 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9351 chan.context.feerate_per_kw = 648;
9353 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9354 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9355 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9358 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9359 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9360 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9363 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9364 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9365 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9368 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9369 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9370 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9373 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9374 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9375 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9378 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9379 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9380 chan.context.feerate_per_kw = 645;
9381 chan.context.holder_dust_limit_satoshis = 1001;
9383 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9384 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9385 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9388 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9389 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9390 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9393 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9394 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9395 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9398 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9399 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9400 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9403 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9404 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9405 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9408 // commitment tx with six outputs untrimmed (maximum feerate)
9409 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9410 chan.context.feerate_per_kw = 2069;
9411 chan.context.holder_dust_limit_satoshis = 546;
9413 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9414 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9415 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9418 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9419 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9420 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9423 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9424 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9425 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9428 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9429 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9430 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9433 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9434 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9435 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9438 // commitment tx with five outputs untrimmed (minimum feerate)
9439 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9440 chan.context.feerate_per_kw = 2070;
9442 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9443 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9444 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9447 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9448 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9449 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9452 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9453 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9454 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9457 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9458 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9459 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9462 // commitment tx with five outputs untrimmed (maximum feerate)
9463 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9464 chan.context.feerate_per_kw = 2194;
9466 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9467 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9468 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9471 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9472 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9473 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9476 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9477 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9478 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9481 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9482 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9483 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9486 // commitment tx with four outputs untrimmed (minimum feerate)
9487 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9488 chan.context.feerate_per_kw = 2195;
9490 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9491 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9492 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9495 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9496 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9497 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9500 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9501 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9502 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9505 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9506 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9507 chan.context.feerate_per_kw = 2185;
9508 chan.context.holder_dust_limit_satoshis = 2001;
9509 let cached_channel_type = chan.context.channel_type;
9510 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9512 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9513 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9514 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9517 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9518 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9519 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9522 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9523 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9524 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9527 // commitment tx with four outputs untrimmed (maximum feerate)
9528 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9529 chan.context.feerate_per_kw = 3702;
9530 chan.context.holder_dust_limit_satoshis = 546;
9531 chan.context.channel_type = cached_channel_type.clone();
9533 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9534 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9535 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9538 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9539 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9540 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9543 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9544 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9545 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9548 // commitment tx with three outputs untrimmed (minimum feerate)
9549 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9550 chan.context.feerate_per_kw = 3703;
9552 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9553 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9554 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9557 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9558 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9559 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9562 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9563 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9564 chan.context.feerate_per_kw = 3687;
9565 chan.context.holder_dust_limit_satoshis = 3001;
9566 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9568 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9569 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9570 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9573 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9574 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9575 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9578 // commitment tx with three outputs untrimmed (maximum feerate)
9579 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9580 chan.context.feerate_per_kw = 4914;
9581 chan.context.holder_dust_limit_satoshis = 546;
9582 chan.context.channel_type = cached_channel_type.clone();
9584 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9585 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9586 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9589 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9590 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9591 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9594 // commitment tx with two outputs untrimmed (minimum feerate)
9595 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9596 chan.context.feerate_per_kw = 4915;
9597 chan.context.holder_dust_limit_satoshis = 546;
9599 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9600 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9601 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9603 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9604 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9605 chan.context.feerate_per_kw = 4894;
9606 chan.context.holder_dust_limit_satoshis = 4001;
9607 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9609 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9610 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9611 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9613 // commitment tx with two outputs untrimmed (maximum feerate)
9614 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9615 chan.context.feerate_per_kw = 9651180;
9616 chan.context.holder_dust_limit_satoshis = 546;
9617 chan.context.channel_type = cached_channel_type.clone();
9619 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9620 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9621 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9623 // commitment tx with one output untrimmed (minimum feerate)
9624 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9625 chan.context.feerate_per_kw = 9651181;
9627 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9628 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9629 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9631 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9632 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9633 chan.context.feerate_per_kw = 6216010;
9634 chan.context.holder_dust_limit_satoshis = 4001;
9635 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9637 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9638 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9639 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9641 // commitment tx with fee greater than funder amount
9642 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9643 chan.context.feerate_per_kw = 9651936;
9644 chan.context.holder_dust_limit_satoshis = 546;
9645 chan.context.channel_type = cached_channel_type;
9647 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9648 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9649 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9651 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9652 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9653 chan.context.feerate_per_kw = 253;
9654 chan.context.pending_inbound_htlcs.clear();
9655 chan.context.pending_inbound_htlcs.push({
9656 let mut out = InboundHTLCOutput{
9658 amount_msat: 2000000,
9660 payment_hash: PaymentHash([0; 32]),
9661 state: InboundHTLCState::Committed,
9663 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9666 chan.context.pending_outbound_htlcs.clear();
9667 chan.context.pending_outbound_htlcs.push({
9668 let mut out = OutboundHTLCOutput{
9670 amount_msat: 5000001,
9672 payment_hash: PaymentHash([0; 32]),
9673 state: OutboundHTLCState::Committed,
9674 source: HTLCSource::dummy(),
9675 skimmed_fee_msat: None,
9676 blinding_point: None,
9678 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9681 chan.context.pending_outbound_htlcs.push({
9682 let mut out = OutboundHTLCOutput{
9684 amount_msat: 5000000,
9686 payment_hash: PaymentHash([0; 32]),
9687 state: OutboundHTLCState::Committed,
9688 source: HTLCSource::dummy(),
9689 skimmed_fee_msat: None,
9690 blinding_point: None,
9692 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9696 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9697 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9698 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9701 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9702 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9703 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9705 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9706 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9707 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9709 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9710 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9711 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9714 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9715 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9716 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9717 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9720 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9721 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9722 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9724 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9725 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9726 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9728 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9729 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9730 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9735 fn test_per_commitment_secret_gen() {
9736 // Test vectors from BOLT 3 Appendix D:
9738 let mut seed = [0; 32];
9739 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9740 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9741 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9743 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9744 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9745 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9747 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9748 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9750 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9751 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9753 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9754 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9755 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9759 fn test_key_derivation() {
9760 // Test vectors from BOLT 3 Appendix E:
9761 let secp_ctx = Secp256k1::new();
9763 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9764 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9766 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9767 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9769 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9770 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9772 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9773 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9775 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9776 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9778 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9779 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9783 fn test_zero_conf_channel_type_support() {
9784 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9785 let secp_ctx = Secp256k1::new();
9786 let seed = [42; 32];
9787 let network = Network::Testnet;
9788 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9789 let logger = test_utils::TestLogger::new();
9791 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9792 let config = UserConfig::default();
9793 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9794 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9796 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9797 channel_type_features.set_zero_conf_required();
9799 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9800 open_channel_msg.channel_type = Some(channel_type_features);
9801 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9802 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9803 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9804 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9805 assert!(res.is_ok());
9809 fn test_supports_anchors_zero_htlc_tx_fee() {
9810 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9811 // resulting `channel_type`.
9812 let secp_ctx = Secp256k1::new();
9813 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9814 let network = Network::Testnet;
9815 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9816 let logger = test_utils::TestLogger::new();
9818 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9819 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9821 let mut config = UserConfig::default();
9822 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9824 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9825 // need to signal it.
9826 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9827 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9828 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9829 &config, 0, 42, None
9831 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9833 let mut expected_channel_type = ChannelTypeFeatures::empty();
9834 expected_channel_type.set_static_remote_key_required();
9835 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9837 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9838 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9839 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9843 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9844 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9845 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9846 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9847 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9850 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9851 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9855 fn test_rejects_implicit_simple_anchors() {
9856 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9857 // each side's `InitFeatures`, it is rejected.
9858 let secp_ctx = Secp256k1::new();
9859 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9860 let network = Network::Testnet;
9861 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9862 let logger = test_utils::TestLogger::new();
9864 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9865 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9867 let config = UserConfig::default();
9869 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9870 let static_remote_key_required: u64 = 1 << 12;
9871 let simple_anchors_required: u64 = 1 << 20;
9872 let raw_init_features = static_remote_key_required | simple_anchors_required;
9873 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9875 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9876 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9877 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9881 // Set `channel_type` to `None` to force the implicit feature negotiation.
9882 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9883 open_channel_msg.channel_type = None;
9885 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9886 // `static_remote_key`, it will fail the channel.
9887 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9888 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9889 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9890 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9892 assert!(channel_b.is_err());
9896 fn test_rejects_simple_anchors_channel_type() {
9897 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9899 let secp_ctx = Secp256k1::new();
9900 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9901 let network = Network::Testnet;
9902 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9903 let logger = test_utils::TestLogger::new();
9905 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9906 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9908 let config = UserConfig::default();
9910 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9911 let static_remote_key_required: u64 = 1 << 12;
9912 let simple_anchors_required: u64 = 1 << 20;
9913 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9914 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9915 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9916 assert!(!simple_anchors_init.requires_unknown_bits());
9917 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9919 // First, we'll try to open a channel between A and B where A requests a channel type for
9920 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9921 // B as it's not supported by LDK.
9922 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9923 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9924 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9928 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9929 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9931 let res = InboundV1Channel::<&TestKeysInterface>::new(
9932 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9933 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9934 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9936 assert!(res.is_err());
9938 // Then, we'll try to open another channel where A requests a channel type for
9939 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9940 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9942 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9943 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9944 10000000, 100000, 42, &config, 0, 42, None
9947 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9949 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9950 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9951 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9952 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9955 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9956 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9958 let res = channel_a.accept_channel(
9959 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9961 assert!(res.is_err());
9965 fn test_waiting_for_batch() {
9966 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9967 let logger = test_utils::TestLogger::new();
9968 let secp_ctx = Secp256k1::new();
9969 let seed = [42; 32];
9970 let network = Network::Testnet;
9971 let best_block = BestBlock::from_network(network);
9972 let chain_hash = ChainHash::using_genesis_block(network);
9973 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9975 let mut config = UserConfig::default();
9976 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9977 // channel in a batch before all channels are ready.
9978 config.channel_handshake_limits.trust_own_funding_0conf = true;
9980 // Create a channel from node a to node b that will be part of batch funding.
9981 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9982 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9987 &channelmanager::provided_init_features(&config),
9997 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9998 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9999 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
10004 &channelmanager::provided_channel_type_features(&config),
10005 &channelmanager::provided_init_features(&config),
10011 true, // Allow node b to send a 0conf channel_ready.
10014 let accept_channel_msg = node_b_chan.accept_inbound_channel();
10015 node_a_chan.accept_channel(
10016 &accept_channel_msg,
10017 &config.channel_handshake_limits,
10018 &channelmanager::provided_init_features(&config),
10021 // Fund the channel with a batch funding transaction.
10022 let output_script = node_a_chan.context.get_funding_redeemscript();
10023 let tx = Transaction {
10025 lock_time: LockTime::ZERO,
10029 value: 10000000, script_pubkey: output_script.clone(),
10032 value: 10000000, script_pubkey: Builder::new().into_script(),
10035 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10036 let funding_created_msg = node_a_chan.get_funding_created(
10037 tx.clone(), funding_outpoint, true, &&logger,
10038 ).map_err(|_| ()).unwrap();
10039 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
10040 &funding_created_msg.unwrap(),
10044 ).map_err(|_| ()).unwrap();
10045 let node_b_updates = node_b_chan.monitor_updating_restored(
10053 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
10054 // broadcasting the funding transaction until the batch is ready.
10055 let res = node_a_chan.funding_signed(
10056 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
10058 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10059 let node_a_updates = node_a_chan.monitor_updating_restored(
10066 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10067 // as the funding transaction depends on all channels in the batch becoming ready.
10068 assert!(node_a_updates.channel_ready.is_none());
10069 assert!(node_a_updates.funding_broadcastable.is_none());
10070 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10072 // It is possible to receive a 0conf channel_ready from the remote node.
10073 node_a_chan.channel_ready(
10074 &node_b_updates.channel_ready.unwrap(),
10082 node_a_chan.context.channel_state,
10083 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10086 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10087 node_a_chan.set_batch_ready();
10088 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10089 assert!(node_a_chan.check_get_channel_ready(0).is_some());