1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
265 sha256_of_onion: [u8; 32],
269 macro_rules! define_state_flags {
270 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
271 #[doc = $flag_type_doc]
272 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
273 struct $flag_type(u32);
278 const $flag: $flag_type = $flag_type($value);
281 /// All flags that apply to the specified [`ChannelState`] variant.
283 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
286 fn new() -> Self { Self(0) }
289 fn from_u32(flags: u32) -> Result<Self, ()> {
290 if flags & !Self::ALL.0 != 0 {
293 Ok($flag_type(flags))
298 fn is_empty(&self) -> bool { self.0 == 0 }
301 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
304 impl core::ops::Not for $flag_type {
306 fn not(self) -> Self::Output { Self(!self.0) }
308 impl core::ops::BitOr for $flag_type {
310 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
312 impl core::ops::BitOrAssign for $flag_type {
313 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
315 impl core::ops::BitAnd for $flag_type {
317 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
319 impl core::ops::BitAndAssign for $flag_type {
320 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
323 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
324 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
326 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
327 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
328 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
330 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
332 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
333 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
335 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
337 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
339 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
340 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
342 impl PartialEq<FundedStateFlags> for $flag_type {
343 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
345 impl From<FundedStateFlags> for $flag_type {
346 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
351 /// We declare all the states/flags here together to help determine which bits are still available
354 pub const OUR_INIT_SENT: u32 = 1 << 0;
355 pub const THEIR_INIT_SENT: u32 = 1 << 1;
356 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
357 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
358 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
359 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
360 pub const CHANNEL_READY: u32 = 1 << 6;
361 pub const PEER_DISCONNECTED: u32 = 1 << 7;
362 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
363 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
364 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
365 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
366 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
367 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
371 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
373 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
374 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
375 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
376 somewhere and we should pause sending any outbound messages until they've managed to \
377 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
378 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
379 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
380 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
381 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
382 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
387 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
388 NegotiatingFundingFlags, [
389 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
390 OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
391 ("Indicates we have received their `open_channel`/`accept_channel` message.",
392 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
397 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
398 FUNDED_STATE, AwaitingChannelReadyFlags, [
399 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
400 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
401 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
402 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
403 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
404 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
405 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
406 is being held until all channels in the batch have received `funding_signed` and have \
407 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
412 "Flags that only apply to [`ChannelState::ChannelReady`].",
413 FUNDED_STATE, ChannelReadyFlags, [
414 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
415 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
416 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
417 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
418 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
422 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
424 /// We are negotiating the parameters required for the channel prior to funding it.
425 NegotiatingFunding(NegotiatingFundingFlags),
426 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
427 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
428 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
430 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
431 /// funding transaction to confirm.
432 AwaitingChannelReady(AwaitingChannelReadyFlags),
433 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
435 ChannelReady(ChannelReadyFlags),
436 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
437 /// is about to drop us, but we store this anyway.
441 macro_rules! impl_state_flag {
442 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
444 fn $get(&self) -> bool {
447 ChannelState::$state(flags) => flags.is_set($state_flag.into()),
456 ChannelState::$state(flags) => *flags |= $state_flag,
458 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
462 fn $clear(&mut self) {
465 ChannelState::$state(flags) => *flags &= !($state_flag),
467 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
471 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
472 impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
474 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
475 impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
480 fn from_u32(state: u32) -> Result<Self, ()> {
482 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
483 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
485 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
486 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
487 .map(|flags| ChannelState::AwaitingChannelReady(flags))
488 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
489 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
490 .map(|flags| ChannelState::ChannelReady(flags))
491 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
492 Ok(ChannelState::NegotiatingFunding(flags))
500 fn to_u32(&self) -> u32 {
502 ChannelState::NegotiatingFunding(flags) => flags.0,
503 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
504 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
505 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
506 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
510 fn is_pre_funded_state(&self) -> bool {
511 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
514 fn is_both_sides_shutdown(&self) -> bool {
515 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
518 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
520 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
521 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
522 _ => FundedStateFlags::new(),
526 fn should_force_holding_cell(&self) -> bool {
528 ChannelState::ChannelReady(flags) =>
529 flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
530 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
531 flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
533 debug_assert!(false, "The holding cell is only valid within ChannelReady");
539 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
540 FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
541 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
542 FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
543 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
544 FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
545 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
546 FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
547 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
548 AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
549 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
550 AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
551 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
552 AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
553 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
554 ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
557 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
559 pub const DEFAULT_MAX_HTLCS: u16 = 50;
561 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
562 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
563 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
564 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
568 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
570 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
572 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
574 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
575 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
576 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
577 /// `holder_max_htlc_value_in_flight_msat`.
578 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
580 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
581 /// `option_support_large_channel` (aka wumbo channels) is not supported.
583 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
585 /// Total bitcoin supply in satoshis.
586 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
588 /// The maximum network dust limit for standard script formats. This currently represents the
589 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
590 /// transaction non-standard and thus refuses to relay it.
591 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
592 /// implementations use this value for their dust limit today.
593 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
595 /// The maximum channel dust limit we will accept from our counterparty.
596 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
598 /// The dust limit is used for both the commitment transaction outputs as well as the closing
599 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
600 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
601 /// In order to avoid having to concern ourselves with standardness during the closing process, we
602 /// simply require our counterparty to use a dust limit which will leave any segwit output
604 /// See <https://github.com/lightning/bolts/issues/905> for more details.
605 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
607 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
608 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
610 /// Used to return a simple Error back to ChannelManager. Will get converted to a
611 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
612 /// channel_id in ChannelManager.
613 pub(super) enum ChannelError {
619 impl fmt::Debug for ChannelError {
620 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
622 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
623 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
624 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
629 impl fmt::Display for ChannelError {
630 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
632 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
633 &ChannelError::Warn(ref e) => write!(f, "{}", e),
634 &ChannelError::Close(ref e) => write!(f, "{}", e),
639 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
641 pub peer_id: Option<PublicKey>,
642 pub channel_id: Option<ChannelId>,
645 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
646 fn log(&self, mut record: Record) {
647 record.peer_id = self.peer_id;
648 record.channel_id = self.channel_id;
649 self.logger.log(record)
653 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
654 where L::Target: Logger {
655 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
656 where S::Target: SignerProvider
660 peer_id: Some(context.counterparty_node_id),
661 channel_id: Some(context.channel_id),
666 macro_rules! secp_check {
667 ($res: expr, $err: expr) => {
670 Err(_) => return Err(ChannelError::Close($err)),
675 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
676 /// our counterparty or not. However, we don't want to announce updates right away to avoid
677 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
678 /// our channel_update message and track the current state here.
679 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
680 #[derive(Clone, Copy, PartialEq)]
681 pub(super) enum ChannelUpdateStatus {
682 /// We've announced the channel as enabled and are connected to our peer.
684 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
686 /// Our channel is live again, but we haven't announced the channel as enabled yet.
688 /// We've announced the channel as disabled.
692 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
694 pub enum AnnouncementSigsState {
695 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
696 /// we sent the last `AnnouncementSignatures`.
698 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
699 /// This state never appears on disk - instead we write `NotSent`.
701 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
702 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
703 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
704 /// they send back a `RevokeAndACK`.
705 /// This state never appears on disk - instead we write `NotSent`.
707 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
708 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
712 /// An enum indicating whether the local or remote side offered a given HTLC.
718 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
721 pending_htlcs_value_msat: u64,
722 on_counterparty_tx_dust_exposure_msat: u64,
723 on_holder_tx_dust_exposure_msat: u64,
724 holding_cell_msat: u64,
725 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
728 /// An enum gathering stats on commitment transaction, either local or remote.
729 struct CommitmentStats<'a> {
730 tx: CommitmentTransaction, // the transaction info
731 feerate_per_kw: u32, // the feerate included to build the transaction
732 total_fee_sat: u64, // the total fee included in the transaction
733 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
734 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
735 local_balance_msat: u64, // local balance before fees *not* considering dust limits
736 remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
737 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
738 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
741 /// Used when calculating whether we or the remote can afford an additional HTLC.
742 struct HTLCCandidate {
744 origin: HTLCInitiator,
748 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
756 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
758 enum UpdateFulfillFetch {
760 monitor_update: ChannelMonitorUpdate,
761 htlc_value_msat: u64,
762 msg: Option<msgs::UpdateFulfillHTLC>,
767 /// The return type of get_update_fulfill_htlc_and_commit.
768 pub enum UpdateFulfillCommitFetch {
769 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
770 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
771 /// previously placed in the holding cell (and has since been removed).
773 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
774 monitor_update: ChannelMonitorUpdate,
775 /// The value of the HTLC which was claimed, in msat.
776 htlc_value_msat: u64,
778 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
779 /// or has been forgotten (presumably previously claimed).
783 /// The return value of `monitor_updating_restored`
784 pub(super) struct MonitorRestoreUpdates {
785 pub raa: Option<msgs::RevokeAndACK>,
786 pub commitment_update: Option<msgs::CommitmentUpdate>,
787 pub order: RAACommitmentOrder,
788 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
789 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
790 pub finalized_claimed_htlcs: Vec<HTLCSource>,
791 pub funding_broadcastable: Option<Transaction>,
792 pub channel_ready: Option<msgs::ChannelReady>,
793 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
796 /// The return value of `signer_maybe_unblocked`
798 pub(super) struct SignerResumeUpdates {
799 pub commitment_update: Option<msgs::CommitmentUpdate>,
800 pub funding_signed: Option<msgs::FundingSigned>,
801 pub channel_ready: Option<msgs::ChannelReady>,
804 /// The return value of `channel_reestablish`
805 pub(super) struct ReestablishResponses {
806 pub channel_ready: Option<msgs::ChannelReady>,
807 pub raa: Option<msgs::RevokeAndACK>,
808 pub commitment_update: Option<msgs::CommitmentUpdate>,
809 pub order: RAACommitmentOrder,
810 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
811 pub shutdown_msg: Option<msgs::Shutdown>,
814 /// The result of a shutdown that should be handled.
816 pub(crate) struct ShutdownResult {
817 /// A channel monitor update to apply.
818 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
819 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
820 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
821 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
822 /// propagated to the remainder of the batch.
823 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
824 pub(crate) channel_id: ChannelId,
825 pub(crate) counterparty_node_id: PublicKey,
828 /// If the majority of the channels funds are to the fundee and the initiator holds only just
829 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
830 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
831 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
832 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
833 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
834 /// by this multiple without hitting this case, before sending.
835 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
836 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
837 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
838 /// leave the channel less usable as we hold a bigger reserve.
839 #[cfg(any(fuzzing, test))]
840 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
841 #[cfg(not(any(fuzzing, test)))]
842 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
844 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
845 /// channel creation on an inbound channel, we simply force-close and move on.
846 /// This constant is the one suggested in BOLT 2.
847 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
849 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
850 /// not have enough balance value remaining to cover the onchain cost of this new
851 /// HTLC weight. If this happens, our counterparty fails the reception of our
852 /// commitment_signed including this new HTLC due to infringement on the channel
854 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
855 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
856 /// leads to a channel force-close. Ultimately, this is an issue coming from the
857 /// design of LN state machines, allowing asynchronous updates.
858 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
860 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
861 /// commitment transaction fees, with at least this many HTLCs present on the commitment
862 /// transaction (not counting the value of the HTLCs themselves).
863 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
865 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
866 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
867 /// ChannelUpdate prompted by the config update. This value was determined as follows:
869 /// * The expected interval between ticks (1 minute).
870 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
871 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
872 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
873 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
875 /// The number of ticks that may elapse while we're waiting for a response to a
876 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
879 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
880 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
882 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
883 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
884 /// exceeding this age limit will be force-closed and purged from memory.
885 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
887 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
888 pub(crate) const COINBASE_MATURITY: u32 = 100;
890 struct PendingChannelMonitorUpdate {
891 update: ChannelMonitorUpdate,
894 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
895 (0, update, required),
898 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
899 /// its variants containing an appropriate channel struct.
900 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
901 UnfundedOutboundV1(OutboundV1Channel<SP>),
902 UnfundedInboundV1(InboundV1Channel<SP>),
906 impl<'a, SP: Deref> ChannelPhase<SP> where
907 SP::Target: SignerProvider,
908 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
910 pub fn context(&'a self) -> &'a ChannelContext<SP> {
912 ChannelPhase::Funded(chan) => &chan.context,
913 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
914 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
918 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
920 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
921 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
922 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
927 /// Contains all state common to unfunded inbound/outbound channels.
928 pub(super) struct UnfundedChannelContext {
929 /// A counter tracking how many ticks have elapsed since this unfunded channel was
930 /// created. If this unfunded channel reaches peer has yet to respond after reaching
931 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
933 /// This is so that we don't keep channels around that haven't progressed to a funded state
934 /// in a timely manner.
935 unfunded_channel_age_ticks: usize,
938 impl UnfundedChannelContext {
939 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
940 /// having reached the unfunded channel age limit.
942 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
943 pub fn should_expire_unfunded_channel(&mut self) -> bool {
944 self.unfunded_channel_age_ticks += 1;
945 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
949 /// Contains everything about the channel including state, and various flags.
950 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
951 config: LegacyChannelConfig,
953 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
954 // constructed using it. The second element in the tuple corresponds to the number of ticks that
955 // have elapsed since the update occurred.
956 prev_config: Option<(ChannelConfig, usize)>,
958 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
962 /// The current channel ID.
963 channel_id: ChannelId,
964 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
965 /// Will be `None` for channels created prior to 0.0.115.
966 temporary_channel_id: Option<ChannelId>,
967 channel_state: ChannelState,
969 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
970 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
972 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
973 // Note that a number of our tests were written prior to the behavior here which retransmits
974 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
976 #[cfg(any(test, feature = "_test_utils"))]
977 pub(crate) announcement_sigs_state: AnnouncementSigsState,
978 #[cfg(not(any(test, feature = "_test_utils")))]
979 announcement_sigs_state: AnnouncementSigsState,
981 secp_ctx: Secp256k1<secp256k1::All>,
982 channel_value_satoshis: u64,
984 latest_monitor_update_id: u64,
986 holder_signer: ChannelSignerType<SP>,
987 shutdown_scriptpubkey: Option<ShutdownScript>,
988 destination_script: ScriptBuf,
990 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
991 // generation start at 0 and count up...this simplifies some parts of implementation at the
992 // cost of others, but should really just be changed.
994 cur_holder_commitment_transaction_number: u64,
995 cur_counterparty_commitment_transaction_number: u64,
996 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
997 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
998 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
999 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1001 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1002 /// need to ensure we resend them in the order we originally generated them. Note that because
1003 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1004 /// sufficient to simply set this to the opposite of any message we are generating as we
1005 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1006 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1008 resend_order: RAACommitmentOrder,
1010 monitor_pending_channel_ready: bool,
1011 monitor_pending_revoke_and_ack: bool,
1012 monitor_pending_commitment_signed: bool,
1014 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1015 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1016 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1017 // but need to handle this somehow or we run the risk of losing HTLCs!
1018 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1019 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1020 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1022 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1023 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1024 /// the future when the signer indicates it may have a signature for us.
1026 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1027 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1028 signer_pending_commitment_update: bool,
1029 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1030 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1031 /// outbound or inbound.
1032 signer_pending_funding: bool,
1034 // pending_update_fee is filled when sending and receiving update_fee.
1036 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1037 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1038 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1039 // HTLCs with similar state.
1040 pending_update_fee: Option<(u32, FeeUpdateState)>,
1041 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1042 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1043 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1044 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1045 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1046 holding_cell_update_fee: Option<u32>,
1047 next_holder_htlc_id: u64,
1048 next_counterparty_htlc_id: u64,
1049 feerate_per_kw: u32,
1051 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1052 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1053 /// new block is received, ensuring it's always at least moderately close to the current real
1055 update_time_counter: u32,
1057 #[cfg(debug_assertions)]
1058 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1059 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1060 #[cfg(debug_assertions)]
1061 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1062 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1064 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1065 target_closing_feerate_sats_per_kw: Option<u32>,
1067 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1068 /// update, we need to delay processing it until later. We do that here by simply storing the
1069 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1070 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1072 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1073 /// transaction. These are set once we reach `closing_negotiation_ready`.
1075 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1077 closing_fee_limits: Option<(u64, u64)>,
1079 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1080 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1081 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1082 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1083 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1085 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1086 /// until we see a `commitment_signed` before doing so.
1088 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1089 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1090 expecting_peer_commitment_signed: bool,
1092 /// The hash of the block in which the funding transaction was included.
1093 funding_tx_confirmed_in: Option<BlockHash>,
1094 funding_tx_confirmation_height: u32,
1095 short_channel_id: Option<u64>,
1096 /// Either the height at which this channel was created or the height at which it was last
1097 /// serialized if it was serialized by versions prior to 0.0.103.
1098 /// We use this to close if funding is never broadcasted.
1099 channel_creation_height: u32,
1101 counterparty_dust_limit_satoshis: u64,
1104 pub(super) holder_dust_limit_satoshis: u64,
1106 holder_dust_limit_satoshis: u64,
1109 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1111 counterparty_max_htlc_value_in_flight_msat: u64,
1114 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1116 holder_max_htlc_value_in_flight_msat: u64,
1118 /// minimum channel reserve for self to maintain - set by them.
1119 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1122 pub(super) holder_selected_channel_reserve_satoshis: u64,
1124 holder_selected_channel_reserve_satoshis: u64,
1126 counterparty_htlc_minimum_msat: u64,
1127 holder_htlc_minimum_msat: u64,
1129 pub counterparty_max_accepted_htlcs: u16,
1131 counterparty_max_accepted_htlcs: u16,
1132 holder_max_accepted_htlcs: u16,
1133 minimum_depth: Option<u32>,
1135 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1137 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1138 funding_transaction: Option<Transaction>,
1139 is_batch_funding: Option<()>,
1141 counterparty_cur_commitment_point: Option<PublicKey>,
1142 counterparty_prev_commitment_point: Option<PublicKey>,
1143 counterparty_node_id: PublicKey,
1145 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1147 commitment_secrets: CounterpartyCommitmentSecrets,
1149 channel_update_status: ChannelUpdateStatus,
1150 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1151 /// not complete within a single timer tick (one minute), we should force-close the channel.
1152 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1154 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1155 /// our peer and start the closing_signed negotiation fresh.
1156 closing_signed_in_flight: bool,
1158 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1159 /// This can be used to rebroadcast the channel_announcement message later.
1160 announcement_sigs: Option<(Signature, Signature)>,
1162 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1163 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1164 // be, by comparing the cached values to the fee of the tranaction generated by
1165 // `build_commitment_transaction`.
1166 #[cfg(any(test, fuzzing))]
1167 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1168 #[cfg(any(test, fuzzing))]
1169 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1171 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1172 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1173 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1174 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1175 /// message until we receive a channel_reestablish.
1177 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1178 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1180 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1181 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1182 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1183 /// unblock the state machine.
1185 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1186 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1187 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1189 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1190 /// [`msgs::RevokeAndACK`] message from the counterparty.
1191 sent_message_awaiting_response: Option<usize>,
1193 #[cfg(any(test, fuzzing))]
1194 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1195 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1196 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1197 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1198 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1199 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1200 historical_inbound_htlc_fulfills: HashSet<u64>,
1202 /// This channel's type, as negotiated during channel open
1203 channel_type: ChannelTypeFeatures,
1205 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1206 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1207 // the channel's funding UTXO.
1209 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1210 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1211 // associated channel mapping.
1213 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1214 // to store all of them.
1215 latest_inbound_scid_alias: Option<u64>,
1217 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1218 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1219 // don't currently support node id aliases and eventually privacy should be provided with
1220 // blinded paths instead of simple scid+node_id aliases.
1221 outbound_scid_alias: u64,
1223 // We track whether we already emitted a `ChannelPending` event.
1224 channel_pending_event_emitted: bool,
1226 // We track whether we already emitted a `ChannelReady` event.
1227 channel_ready_event_emitted: bool,
1229 /// The unique identifier used to re-derive the private key material for the channel through
1230 /// [`SignerProvider::derive_channel_signer`].
1231 channel_keys_id: [u8; 32],
1233 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1234 /// store it here and only release it to the `ChannelManager` once it asks for it.
1235 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1238 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1239 /// Allowed in any state (including after shutdown)
1240 pub fn get_update_time_counter(&self) -> u32 {
1241 self.update_time_counter
1244 pub fn get_latest_monitor_update_id(&self) -> u64 {
1245 self.latest_monitor_update_id
1248 pub fn should_announce(&self) -> bool {
1249 self.config.announced_channel
1252 pub fn is_outbound(&self) -> bool {
1253 self.channel_transaction_parameters.is_outbound_from_holder
1256 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1257 /// Allowed in any state (including after shutdown)
1258 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1259 self.config.options.forwarding_fee_base_msat
1262 /// Returns true if we've ever received a message from the remote end for this Channel
1263 pub fn have_received_message(&self) -> bool {
1264 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1267 /// Returns true if this channel is fully established and not known to be closing.
1268 /// Allowed in any state (including after shutdown)
1269 pub fn is_usable(&self) -> bool {
1270 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1271 !self.channel_state.is_local_shutdown_sent() &&
1272 !self.channel_state.is_remote_shutdown_sent() &&
1273 !self.monitor_pending_channel_ready
1276 /// shutdown state returns the state of the channel in its various stages of shutdown
1277 pub fn shutdown_state(&self) -> ChannelShutdownState {
1278 match self.channel_state {
1279 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1280 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1281 ChannelShutdownState::ShutdownInitiated
1282 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1283 ChannelShutdownState::ResolvingHTLCs
1284 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1285 ChannelShutdownState::NegotiatingClosingFee
1287 ChannelShutdownState::NotShuttingDown
1289 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1290 _ => ChannelShutdownState::NotShuttingDown,
1294 fn closing_negotiation_ready(&self) -> bool {
1295 let is_ready_to_close = match self.channel_state {
1296 ChannelState::AwaitingChannelReady(flags) =>
1297 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1298 ChannelState::ChannelReady(flags) =>
1299 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1302 self.pending_inbound_htlcs.is_empty() &&
1303 self.pending_outbound_htlcs.is_empty() &&
1304 self.pending_update_fee.is_none() &&
1308 /// Returns true if this channel is currently available for use. This is a superset of
1309 /// is_usable() and considers things like the channel being temporarily disabled.
1310 /// Allowed in any state (including after shutdown)
1311 pub fn is_live(&self) -> bool {
1312 self.is_usable() && !self.channel_state.is_peer_disconnected()
1315 // Public utilities:
1317 pub fn channel_id(&self) -> ChannelId {
1321 // Return the `temporary_channel_id` used during channel establishment.
1323 // Will return `None` for channels created prior to LDK version 0.0.115.
1324 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1325 self.temporary_channel_id
1328 pub fn minimum_depth(&self) -> Option<u32> {
1332 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1333 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1334 pub fn get_user_id(&self) -> u128 {
1338 /// Gets the channel's type
1339 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1343 /// Gets the channel's `short_channel_id`.
1345 /// Will return `None` if the channel hasn't been confirmed yet.
1346 pub fn get_short_channel_id(&self) -> Option<u64> {
1347 self.short_channel_id
1350 /// Allowed in any state (including after shutdown)
1351 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1352 self.latest_inbound_scid_alias
1355 /// Allowed in any state (including after shutdown)
1356 pub fn outbound_scid_alias(&self) -> u64 {
1357 self.outbound_scid_alias
1360 /// Returns the holder signer for this channel.
1362 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1363 return &self.holder_signer
1366 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1367 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1368 /// or prior to any channel actions during `Channel` initialization.
1369 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1370 debug_assert_eq!(self.outbound_scid_alias, 0);
1371 self.outbound_scid_alias = outbound_scid_alias;
1374 /// Returns the funding_txo we either got from our peer, or were given by
1375 /// get_funding_created.
1376 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1377 self.channel_transaction_parameters.funding_outpoint
1380 /// Returns the height in which our funding transaction was confirmed.
1381 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1382 let conf_height = self.funding_tx_confirmation_height;
1383 if conf_height > 0 {
1390 /// Returns the block hash in which our funding transaction was confirmed.
1391 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1392 self.funding_tx_confirmed_in
1395 /// Returns the current number of confirmations on the funding transaction.
1396 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1397 if self.funding_tx_confirmation_height == 0 {
1398 // We either haven't seen any confirmation yet, or observed a reorg.
1402 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1405 fn get_holder_selected_contest_delay(&self) -> u16 {
1406 self.channel_transaction_parameters.holder_selected_contest_delay
1409 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1410 &self.channel_transaction_parameters.holder_pubkeys
1413 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1414 self.channel_transaction_parameters.counterparty_parameters
1415 .as_ref().map(|params| params.selected_contest_delay)
1418 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1419 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1422 /// Allowed in any state (including after shutdown)
1423 pub fn get_counterparty_node_id(&self) -> PublicKey {
1424 self.counterparty_node_id
1427 /// Allowed in any state (including after shutdown)
1428 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1429 self.holder_htlc_minimum_msat
1432 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1433 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1434 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1437 /// Allowed in any state (including after shutdown)
1438 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1440 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1441 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1442 // channel might have been used to route very small values (either by honest users or as DoS).
1443 self.channel_value_satoshis * 1000 * 9 / 10,
1445 self.counterparty_max_htlc_value_in_flight_msat
1449 /// Allowed in any state (including after shutdown)
1450 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1451 self.counterparty_htlc_minimum_msat
1454 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1455 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1456 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1459 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1460 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1461 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1463 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1464 party_max_htlc_value_in_flight_msat
1469 pub fn get_value_satoshis(&self) -> u64 {
1470 self.channel_value_satoshis
1473 pub fn get_fee_proportional_millionths(&self) -> u32 {
1474 self.config.options.forwarding_fee_proportional_millionths
1477 pub fn get_cltv_expiry_delta(&self) -> u16 {
1478 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1481 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1482 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1483 where F::Target: FeeEstimator
1485 match self.config.options.max_dust_htlc_exposure {
1486 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1487 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1488 ConfirmationTarget::OnChainSweep) as u64;
1489 feerate_per_kw.saturating_mul(multiplier)
1491 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1495 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1496 pub fn prev_config(&self) -> Option<ChannelConfig> {
1497 self.prev_config.map(|prev_config| prev_config.0)
1500 // Checks whether we should emit a `ChannelPending` event.
1501 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1502 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1505 // Returns whether we already emitted a `ChannelPending` event.
1506 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1507 self.channel_pending_event_emitted
1510 // Remembers that we already emitted a `ChannelPending` event.
1511 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1512 self.channel_pending_event_emitted = true;
1515 // Checks whether we should emit a `ChannelReady` event.
1516 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1517 self.is_usable() && !self.channel_ready_event_emitted
1520 // Remembers that we already emitted a `ChannelReady` event.
1521 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1522 self.channel_ready_event_emitted = true;
1525 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1526 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1527 /// no longer be considered when forwarding HTLCs.
1528 pub fn maybe_expire_prev_config(&mut self) {
1529 if self.prev_config.is_none() {
1532 let prev_config = self.prev_config.as_mut().unwrap();
1534 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1535 self.prev_config = None;
1539 /// Returns the current [`ChannelConfig`] applied to the channel.
1540 pub fn config(&self) -> ChannelConfig {
1544 /// Updates the channel's config. A bool is returned indicating whether the config update
1545 /// applied resulted in a new ChannelUpdate message.
1546 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1547 let did_channel_update =
1548 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1549 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1550 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1551 if did_channel_update {
1552 self.prev_config = Some((self.config.options, 0));
1553 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1554 // policy change to propagate throughout the network.
1555 self.update_time_counter += 1;
1557 self.config.options = *config;
1561 /// Returns true if funding_signed was sent/received and the
1562 /// funding transaction has been broadcast if necessary.
1563 pub fn is_funding_broadcast(&self) -> bool {
1564 !self.channel_state.is_pre_funded_state() &&
1565 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1568 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1569 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1570 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1571 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1572 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1574 /// @local is used only to convert relevant internal structures which refer to remote vs local
1575 /// to decide value of outputs and direction of HTLCs.
1576 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1577 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1578 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1579 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1580 /// which peer generated this transaction and "to whom" this transaction flows.
1582 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1583 where L::Target: Logger
1585 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1586 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1587 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1589 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1590 let mut remote_htlc_total_msat = 0;
1591 let mut local_htlc_total_msat = 0;
1592 let mut value_to_self_msat_offset = 0;
1594 let mut feerate_per_kw = self.feerate_per_kw;
1595 if let Some((feerate, update_state)) = self.pending_update_fee {
1596 if match update_state {
1597 // Note that these match the inclusion criteria when scanning
1598 // pending_inbound_htlcs below.
1599 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1600 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1601 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1603 feerate_per_kw = feerate;
1607 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1608 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1609 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1611 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1613 macro_rules! get_htlc_in_commitment {
1614 ($htlc: expr, $offered: expr) => {
1615 HTLCOutputInCommitment {
1617 amount_msat: $htlc.amount_msat,
1618 cltv_expiry: $htlc.cltv_expiry,
1619 payment_hash: $htlc.payment_hash,
1620 transaction_output_index: None
1625 macro_rules! add_htlc_output {
1626 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1627 if $outbound == local { // "offered HTLC output"
1628 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1629 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1632 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1634 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1635 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1636 included_non_dust_htlcs.push((htlc_in_tx, $source));
1638 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1639 included_dust_htlcs.push((htlc_in_tx, $source));
1642 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1643 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1646 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1648 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1649 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1650 included_non_dust_htlcs.push((htlc_in_tx, $source));
1652 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1653 included_dust_htlcs.push((htlc_in_tx, $source));
1659 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1661 for ref htlc in self.pending_inbound_htlcs.iter() {
1662 let (include, state_name) = match htlc.state {
1663 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1664 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1665 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1666 InboundHTLCState::Committed => (true, "Committed"),
1667 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1671 add_htlc_output!(htlc, false, None, state_name);
1672 remote_htlc_total_msat += htlc.amount_msat;
1674 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1676 &InboundHTLCState::LocalRemoved(ref reason) => {
1677 if generated_by_local {
1678 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1679 inbound_htlc_preimages.push(preimage);
1680 value_to_self_msat_offset += htlc.amount_msat as i64;
1690 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1692 for ref htlc in self.pending_outbound_htlcs.iter() {
1693 let (include, state_name) = match htlc.state {
1694 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1695 OutboundHTLCState::Committed => (true, "Committed"),
1696 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1697 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1698 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1701 let preimage_opt = match htlc.state {
1702 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1703 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1704 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1708 if let Some(preimage) = preimage_opt {
1709 outbound_htlc_preimages.push(preimage);
1713 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1714 local_htlc_total_msat += htlc.amount_msat;
1716 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1718 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1719 value_to_self_msat_offset -= htlc.amount_msat as i64;
1721 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1722 if !generated_by_local {
1723 value_to_self_msat_offset -= htlc.amount_msat as i64;
1731 let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1732 assert!(value_to_self_msat >= 0);
1733 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1734 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1735 // "violate" their reserve value by couting those against it. Thus, we have to convert
1736 // everything to i64 before subtracting as otherwise we can overflow.
1737 let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1738 assert!(value_to_remote_msat >= 0);
1740 #[cfg(debug_assertions)]
1742 // Make sure that the to_self/to_remote is always either past the appropriate
1743 // channel_reserve *or* it is making progress towards it.
1744 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1745 self.holder_max_commitment_tx_output.lock().unwrap()
1747 self.counterparty_max_commitment_tx_output.lock().unwrap()
1749 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1750 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1751 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1752 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1755 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1756 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1757 let (value_to_self, value_to_remote) = if self.is_outbound() {
1758 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1760 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1763 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1764 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1765 let (funding_pubkey_a, funding_pubkey_b) = if local {
1766 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1768 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1771 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1772 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1777 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1778 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1783 let num_nondust_htlcs = included_non_dust_htlcs.len();
1785 let channel_parameters =
1786 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1787 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1788 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1795 &mut included_non_dust_htlcs,
1798 let mut htlcs_included = included_non_dust_htlcs;
1799 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1800 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1801 htlcs_included.append(&mut included_dust_htlcs);
1809 local_balance_msat: value_to_self_msat as u64,
1810 remote_balance_msat: value_to_remote_msat as u64,
1811 inbound_htlc_preimages,
1812 outbound_htlc_preimages,
1817 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1818 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1819 /// our counterparty!)
1820 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1821 /// TODO Some magic rust shit to compile-time check this?
1822 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1823 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1824 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1825 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1826 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1828 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1832 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1833 /// will sign and send to our counterparty.
1834 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1835 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1836 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1837 //may see payments to it!
1838 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1839 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1840 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1842 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1845 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1846 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1847 /// Panics if called before accept_channel/InboundV1Channel::new
1848 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1849 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1852 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1853 &self.get_counterparty_pubkeys().funding_pubkey
1856 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1860 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1861 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1862 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1863 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1864 // more dust balance if the feerate increases when we have several HTLCs pending
1865 // which are near the dust limit.
1866 let mut feerate_per_kw = self.feerate_per_kw;
1867 // If there's a pending update fee, use it to ensure we aren't under-estimating
1868 // potential feerate updates coming soon.
1869 if let Some((feerate, _)) = self.pending_update_fee {
1870 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1872 if let Some(feerate) = outbound_feerate_update {
1873 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1875 let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
1876 cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
1879 /// Get forwarding information for the counterparty.
1880 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1881 self.counterparty_forwarding_info.clone()
1884 /// Returns a HTLCStats about inbound pending htlcs
1885 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1887 let mut stats = HTLCStats {
1888 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1889 pending_htlcs_value_msat: 0,
1890 on_counterparty_tx_dust_exposure_msat: 0,
1891 on_holder_tx_dust_exposure_msat: 0,
1892 holding_cell_msat: 0,
1893 on_holder_tx_holding_cell_htlcs_count: 0,
1896 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1899 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1900 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1901 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1903 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1904 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1905 for ref htlc in context.pending_inbound_htlcs.iter() {
1906 stats.pending_htlcs_value_msat += htlc.amount_msat;
1907 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1908 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1910 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1911 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1917 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1918 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1920 let mut stats = HTLCStats {
1921 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1922 pending_htlcs_value_msat: 0,
1923 on_counterparty_tx_dust_exposure_msat: 0,
1924 on_holder_tx_dust_exposure_msat: 0,
1925 holding_cell_msat: 0,
1926 on_holder_tx_holding_cell_htlcs_count: 0,
1929 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1932 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1933 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1934 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1936 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1937 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1938 for ref htlc in context.pending_outbound_htlcs.iter() {
1939 stats.pending_htlcs_value_msat += htlc.amount_msat;
1940 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1941 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1943 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1944 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1948 for update in context.holding_cell_htlc_updates.iter() {
1949 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1950 stats.pending_htlcs += 1;
1951 stats.pending_htlcs_value_msat += amount_msat;
1952 stats.holding_cell_msat += amount_msat;
1953 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1954 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1956 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1957 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1959 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1966 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1967 /// Doesn't bother handling the
1968 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1969 /// corner case properly.
1970 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1971 -> AvailableBalances
1972 where F::Target: FeeEstimator
1974 let context = &self;
1975 // Note that we have to handle overflow due to the above case.
1976 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1977 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1979 let mut balance_msat = context.value_to_self_msat;
1980 for ref htlc in context.pending_inbound_htlcs.iter() {
1981 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1982 balance_msat += htlc.amount_msat;
1985 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1987 let outbound_capacity_msat = context.value_to_self_msat
1988 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1990 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1992 let mut available_capacity_msat = outbound_capacity_msat;
1994 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1995 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1999 if context.is_outbound() {
2000 // We should mind channel commit tx fee when computing how much of the available capacity
2001 // can be used in the next htlc. Mirrors the logic in send_htlc.
2003 // The fee depends on whether the amount we will be sending is above dust or not,
2004 // and the answer will in turn change the amount itself — making it a circular
2006 // This complicates the computation around dust-values, up to the one-htlc-value.
2007 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2008 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2009 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2012 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2013 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2014 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2015 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2016 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2017 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2018 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2021 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2022 // value ends up being below dust, we have this fee available again. In that case,
2023 // match the value to right-below-dust.
2024 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2025 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2026 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2027 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2028 debug_assert!(one_htlc_difference_msat != 0);
2029 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2030 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2031 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2033 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2036 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2037 // sending a new HTLC won't reduce their balance below our reserve threshold.
2038 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2039 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2040 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2043 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2044 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2046 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2047 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2048 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2050 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2051 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2052 // we've selected for them, we can only send dust HTLCs.
2053 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2057 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2059 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2060 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2061 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2062 // send above the dust limit (as the router can always overpay to meet the dust limit).
2063 let mut remaining_msat_below_dust_exposure_limit = None;
2064 let mut dust_exposure_dust_limit_msat = 0;
2065 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2067 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2068 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2070 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2071 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2072 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2074 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2075 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2076 remaining_msat_below_dust_exposure_limit =
2077 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2078 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2081 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2082 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2083 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2084 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2085 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2086 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2089 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2090 if available_capacity_msat < dust_exposure_dust_limit_msat {
2091 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2093 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2097 available_capacity_msat = cmp::min(available_capacity_msat,
2098 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2100 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2101 available_capacity_msat = 0;
2105 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2106 - context.value_to_self_msat as i64
2107 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2108 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2110 outbound_capacity_msat,
2111 next_outbound_htlc_limit_msat: available_capacity_msat,
2112 next_outbound_htlc_minimum_msat,
2117 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2118 let context = &self;
2119 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2122 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2123 /// number of pending HTLCs that are on track to be in our next commitment tx.
2125 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2126 /// `fee_spike_buffer_htlc` is `Some`.
2128 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2129 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2131 /// Dust HTLCs are excluded.
2132 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2133 let context = &self;
2134 assert!(context.is_outbound());
2136 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2139 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2140 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2142 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2143 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2145 let mut addl_htlcs = 0;
2146 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2148 HTLCInitiator::LocalOffered => {
2149 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2153 HTLCInitiator::RemoteOffered => {
2154 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2160 let mut included_htlcs = 0;
2161 for ref htlc in context.pending_inbound_htlcs.iter() {
2162 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2165 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2166 // transaction including this HTLC if it times out before they RAA.
2167 included_htlcs += 1;
2170 for ref htlc in context.pending_outbound_htlcs.iter() {
2171 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2175 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2176 OutboundHTLCState::Committed => included_htlcs += 1,
2177 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2178 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2179 // transaction won't be generated until they send us their next RAA, which will mean
2180 // dropping any HTLCs in this state.
2185 for htlc in context.holding_cell_htlc_updates.iter() {
2187 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2188 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2193 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2194 // ack we're guaranteed to never include them in commitment txs anymore.
2198 let num_htlcs = included_htlcs + addl_htlcs;
2199 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2200 #[cfg(any(test, fuzzing))]
2203 if fee_spike_buffer_htlc.is_some() {
2204 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2206 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2207 + context.holding_cell_htlc_updates.len();
2208 let commitment_tx_info = CommitmentTxInfoCached {
2210 total_pending_htlcs,
2211 next_holder_htlc_id: match htlc.origin {
2212 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2213 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2215 next_counterparty_htlc_id: match htlc.origin {
2216 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2217 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2219 feerate: context.feerate_per_kw,
2221 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2226 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2227 /// pending HTLCs that are on track to be in their next commitment tx
2229 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2230 /// `fee_spike_buffer_htlc` is `Some`.
2232 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2233 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2235 /// Dust HTLCs are excluded.
2236 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2237 let context = &self;
2238 assert!(!context.is_outbound());
2240 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2243 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2244 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2246 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2247 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2249 let mut addl_htlcs = 0;
2250 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2252 HTLCInitiator::LocalOffered => {
2253 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2257 HTLCInitiator::RemoteOffered => {
2258 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2264 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2265 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2266 // committed outbound HTLCs, see below.
2267 let mut included_htlcs = 0;
2268 for ref htlc in context.pending_inbound_htlcs.iter() {
2269 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2272 included_htlcs += 1;
2275 for ref htlc in context.pending_outbound_htlcs.iter() {
2276 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2279 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2280 // i.e. if they've responded to us with an RAA after announcement.
2282 OutboundHTLCState::Committed => included_htlcs += 1,
2283 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2284 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2289 let num_htlcs = included_htlcs + addl_htlcs;
2290 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2291 #[cfg(any(test, fuzzing))]
2294 if fee_spike_buffer_htlc.is_some() {
2295 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2297 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2298 let commitment_tx_info = CommitmentTxInfoCached {
2300 total_pending_htlcs,
2301 next_holder_htlc_id: match htlc.origin {
2302 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2303 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2305 next_counterparty_htlc_id: match htlc.origin {
2306 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2307 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2309 feerate: context.feerate_per_kw,
2311 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2316 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2317 where F: Fn() -> Option<O> {
2318 match self.channel_state {
2319 ChannelState::FundingNegotiated => f(),
2320 ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) {
2329 /// Returns the transaction if there is a pending funding transaction that is yet to be
2331 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2332 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2335 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2337 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2338 self.if_unbroadcasted_funding(||
2339 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2343 /// Returns whether the channel is funded in a batch.
2344 pub fn is_batch_funding(&self) -> bool {
2345 self.is_batch_funding.is_some()
2348 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2350 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2351 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2354 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2355 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2356 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2357 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2358 /// immediately (others we will have to allow to time out).
2359 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2360 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2361 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2362 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2363 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2364 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2366 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2367 // return them to fail the payment.
2368 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2369 let counterparty_node_id = self.get_counterparty_node_id();
2370 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2372 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2373 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2378 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2379 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2380 // returning a channel monitor update here would imply a channel monitor update before
2381 // we even registered the channel monitor to begin with, which is invalid.
2382 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2383 // funding transaction, don't return a funding txo (which prevents providing the
2384 // monitor update to the user, even if we return one).
2385 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2386 let generate_monitor_update = match self.channel_state {
2387 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
2390 if generate_monitor_update {
2391 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2392 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2393 update_id: self.latest_monitor_update_id,
2394 counterparty_node_id: Some(self.counterparty_node_id),
2395 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2399 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2401 self.channel_state = ChannelState::ShutdownComplete;
2402 self.update_time_counter += 1;
2405 dropped_outbound_htlcs,
2406 unbroadcasted_batch_funding_txid,
2407 channel_id: self.channel_id,
2408 counterparty_node_id: self.counterparty_node_id,
2412 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2413 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2414 let counterparty_keys = self.build_remote_transaction_keys();
2415 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2417 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2418 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2419 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2420 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2422 match &self.holder_signer {
2423 // TODO (arik): move match into calling method for Taproot
2424 ChannelSignerType::Ecdsa(ecdsa) => {
2425 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2426 .map(|(signature, _)| msgs::FundingSigned {
2427 channel_id: self.channel_id(),
2430 partial_signature_with_nonce: None,
2434 if funding_signed.is_none() {
2435 #[cfg(not(async_signing))] {
2436 panic!("Failed to get signature for funding_signed");
2438 #[cfg(async_signing)] {
2439 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2440 self.signer_pending_funding = true;
2442 } else if self.signer_pending_funding {
2443 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2444 self.signer_pending_funding = false;
2447 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2448 (counterparty_initial_commitment_tx, funding_signed)
2450 // TODO (taproot|arik)
2457 // Internal utility functions for channels
2459 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2460 /// `channel_value_satoshis` in msat, set through
2461 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2463 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2465 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2466 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2467 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2469 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2472 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2474 channel_value_satoshis * 10 * configured_percent
2477 /// Returns a minimum channel reserve value the remote needs to maintain,
2478 /// required by us according to the configured or default
2479 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2481 /// Guaranteed to return a value no larger than channel_value_satoshis
2483 /// This is used both for outbound and inbound channels and has lower bound
2484 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2485 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2486 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2487 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2490 /// This is for legacy reasons, present for forward-compatibility.
2491 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2492 /// from storage. Hence, we use this function to not persist default values of
2493 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2494 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2495 let (q, _) = channel_value_satoshis.overflowing_div(100);
2496 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2499 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2500 // Note that num_htlcs should not include dust HTLCs.
2502 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2503 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2506 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2507 // Note that num_htlcs should not include dust HTLCs.
2508 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2509 // Note that we need to divide before multiplying to round properly,
2510 // since the lowest denomination of bitcoin on-chain is the satoshi.
2511 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2514 // Holder designates channel data owned for the benefit of the user client.
2515 // Counterparty designates channel data owned by the another channel participant entity.
2516 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2517 pub context: ChannelContext<SP>,
2520 #[cfg(any(test, fuzzing))]
2521 struct CommitmentTxInfoCached {
2523 total_pending_htlcs: usize,
2524 next_holder_htlc_id: u64,
2525 next_counterparty_htlc_id: u64,
2529 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
2530 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
2531 trait FailHTLCContents {
2532 type Message: FailHTLCMessageName;
2533 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
2534 fn to_inbound_htlc_state(self) -> InboundHTLCState;
2535 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
2537 impl FailHTLCContents for msgs::OnionErrorPacket {
2538 type Message = msgs::UpdateFailHTLC;
2539 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2540 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
2542 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2543 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
2545 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2546 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
2549 impl FailHTLCContents for (u16, [u8; 32]) {
2550 type Message = msgs::UpdateFailMalformedHTLC; // (failure_code, sha256_of_onion)
2551 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2552 msgs::UpdateFailMalformedHTLC {
2555 failure_code: self.0,
2556 sha256_of_onion: self.1
2559 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2560 InboundHTLCState::LocalRemoved(
2561 InboundHTLCRemovalReason::FailMalformed((self.1, self.0))
2564 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2565 HTLCUpdateAwaitingACK::FailMalformedHTLC {
2567 failure_code: self.0,
2568 sha256_of_onion: self.1
2573 trait FailHTLCMessageName {
2574 fn name() -> &'static str;
2576 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
2577 fn name() -> &'static str {
2581 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
2582 fn name() -> &'static str {
2583 "update_fail_malformed_htlc"
2587 impl<SP: Deref> Channel<SP> where
2588 SP::Target: SignerProvider,
2589 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2591 fn check_remote_fee<F: Deref, L: Deref>(
2592 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2593 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2594 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2596 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2597 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2599 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2601 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2602 if feerate_per_kw < lower_limit {
2603 if let Some(cur_feerate) = cur_feerate_per_kw {
2604 if feerate_per_kw > cur_feerate {
2606 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2607 cur_feerate, feerate_per_kw);
2611 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2617 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2618 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2619 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2620 // outside of those situations will fail.
2621 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2625 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2630 1 + // script length (0)
2634 )*4 + // * 4 for non-witness parts
2635 2 + // witness marker and flag
2636 1 + // witness element count
2637 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2638 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2639 2*(1 + 71); // two signatures + sighash type flags
2640 if let Some(spk) = a_scriptpubkey {
2641 ret += ((8+1) + // output values and script length
2642 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2644 if let Some(spk) = b_scriptpubkey {
2645 ret += ((8+1) + // output values and script length
2646 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2652 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2653 assert!(self.context.pending_inbound_htlcs.is_empty());
2654 assert!(self.context.pending_outbound_htlcs.is_empty());
2655 assert!(self.context.pending_update_fee.is_none());
2657 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2658 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2659 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2661 if value_to_holder < 0 {
2662 assert!(self.context.is_outbound());
2663 total_fee_satoshis += (-value_to_holder) as u64;
2664 } else if value_to_counterparty < 0 {
2665 assert!(!self.context.is_outbound());
2666 total_fee_satoshis += (-value_to_counterparty) as u64;
2669 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2670 value_to_counterparty = 0;
2673 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2674 value_to_holder = 0;
2677 assert!(self.context.shutdown_scriptpubkey.is_some());
2678 let holder_shutdown_script = self.get_closing_scriptpubkey();
2679 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2680 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2682 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2683 (closing_transaction, total_fee_satoshis)
2686 fn funding_outpoint(&self) -> OutPoint {
2687 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2690 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2693 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2694 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2696 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2698 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2699 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2700 where L::Target: Logger {
2701 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2702 // (see equivalent if condition there).
2703 assert!(self.context.channel_state.should_force_holding_cell());
2704 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2705 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2706 self.context.latest_monitor_update_id = mon_update_id;
2707 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2708 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2712 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2713 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2714 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2715 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2717 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2718 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2721 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2722 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2723 // these, but for now we just have to treat them as normal.
2725 let mut pending_idx = core::usize::MAX;
2726 let mut htlc_value_msat = 0;
2727 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2728 if htlc.htlc_id == htlc_id_arg {
2729 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2730 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2731 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2733 InboundHTLCState::Committed => {},
2734 InboundHTLCState::LocalRemoved(ref reason) => {
2735 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2737 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2738 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2740 return UpdateFulfillFetch::DuplicateClaim {};
2743 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2744 // Don't return in release mode here so that we can update channel_monitor
2748 htlc_value_msat = htlc.amount_msat;
2752 if pending_idx == core::usize::MAX {
2753 #[cfg(any(test, fuzzing))]
2754 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2755 // this is simply a duplicate claim, not previously failed and we lost funds.
2756 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2757 return UpdateFulfillFetch::DuplicateClaim {};
2760 // Now update local state:
2762 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2763 // can claim it even if the channel hits the chain before we see their next commitment.
2764 self.context.latest_monitor_update_id += 1;
2765 let monitor_update = ChannelMonitorUpdate {
2766 update_id: self.context.latest_monitor_update_id,
2767 counterparty_node_id: Some(self.context.counterparty_node_id),
2768 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2769 payment_preimage: payment_preimage_arg.clone(),
2773 if self.context.channel_state.should_force_holding_cell() {
2774 // Note that this condition is the same as the assertion in
2775 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2776 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2777 // do not not get into this branch.
2778 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2779 match pending_update {
2780 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2781 if htlc_id_arg == htlc_id {
2782 // Make sure we don't leave latest_monitor_update_id incremented here:
2783 self.context.latest_monitor_update_id -= 1;
2784 #[cfg(any(test, fuzzing))]
2785 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2786 return UpdateFulfillFetch::DuplicateClaim {};
2789 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2790 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2792 if htlc_id_arg == htlc_id {
2793 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2794 // TODO: We may actually be able to switch to a fulfill here, though its
2795 // rare enough it may not be worth the complexity burden.
2796 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2797 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2803 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
2804 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2805 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2807 #[cfg(any(test, fuzzing))]
2808 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2809 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2811 #[cfg(any(test, fuzzing))]
2812 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2815 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2816 if let InboundHTLCState::Committed = htlc.state {
2818 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2819 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2821 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2822 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2825 UpdateFulfillFetch::NewClaim {
2828 msg: Some(msgs::UpdateFulfillHTLC {
2829 channel_id: self.context.channel_id(),
2830 htlc_id: htlc_id_arg,
2831 payment_preimage: payment_preimage_arg,
2836 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2837 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2838 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2839 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2840 // Even if we aren't supposed to let new monitor updates with commitment state
2841 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2842 // matter what. Sadly, to push a new monitor update which flies before others
2843 // already queued, we have to insert it into the pending queue and update the
2844 // update_ids of all the following monitors.
2845 if release_cs_monitor && msg.is_some() {
2846 let mut additional_update = self.build_commitment_no_status_check(logger);
2847 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2848 // to be strictly increasing by one, so decrement it here.
2849 self.context.latest_monitor_update_id = monitor_update.update_id;
2850 monitor_update.updates.append(&mut additional_update.updates);
2852 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2853 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2854 monitor_update.update_id = new_mon_id;
2855 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2856 held_update.update.update_id += 1;
2859 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2860 let update = self.build_commitment_no_status_check(logger);
2861 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2867 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2868 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2870 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2874 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2875 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2876 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2877 /// before we fail backwards.
2879 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2880 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2881 /// [`ChannelError::Ignore`].
2882 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2883 -> Result<(), ChannelError> where L::Target: Logger {
2884 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2885 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2888 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
2889 /// want to fail blinded HTLCs where we are not the intro node.
2891 /// See [`Self::queue_fail_htlc`] for more info.
2892 pub fn queue_fail_malformed_htlc<L: Deref>(
2893 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
2894 ) -> Result<(), ChannelError> where L::Target: Logger {
2895 self.fail_htlc(htlc_id_arg, (failure_code, sha256_of_onion), true, logger)
2896 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2899 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2900 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2901 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2902 /// before we fail backwards.
2904 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2905 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2906 /// [`ChannelError::Ignore`].
2907 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
2908 &mut self, htlc_id_arg: u64, err_packet: E, mut force_holding_cell: bool,
2910 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
2911 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2912 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2915 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2916 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2917 // these, but for now we just have to treat them as normal.
2919 let mut pending_idx = core::usize::MAX;
2920 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2921 if htlc.htlc_id == htlc_id_arg {
2923 InboundHTLCState::Committed => {},
2924 InboundHTLCState::LocalRemoved(ref reason) => {
2925 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2927 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2932 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2933 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2939 if pending_idx == core::usize::MAX {
2940 #[cfg(any(test, fuzzing))]
2941 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2942 // is simply a duplicate fail, not previously failed and we failed-back too early.
2943 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2947 if self.context.channel_state.should_force_holding_cell() {
2948 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2949 force_holding_cell = true;
2952 // Now update local state:
2953 if force_holding_cell {
2954 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2955 match pending_update {
2956 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2957 if htlc_id_arg == htlc_id {
2958 #[cfg(any(test, fuzzing))]
2959 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2963 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2964 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2966 if htlc_id_arg == htlc_id {
2967 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2968 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2974 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2975 self.context.holding_cell_htlc_updates.push(err_packet.to_htlc_update_awaiting_ack(htlc_id_arg));
2979 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
2980 E::Message::name(), &self.context.channel_id());
2982 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2983 htlc.state = err_packet.clone().to_inbound_htlc_state();
2986 Ok(Some(err_packet.to_message(htlc_id_arg, self.context.channel_id())))
2989 // Message handlers:
2990 /// Updates the state of the channel to indicate that all channels in the batch have received
2991 /// funding_signed and persisted their monitors.
2992 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2993 /// treated as a non-batch channel going forward.
2994 pub fn set_batch_ready(&mut self) {
2995 self.context.is_batch_funding = None;
2996 self.context.channel_state.clear_waiting_for_batch();
2999 /// Unsets the existing funding information.
3001 /// This must only be used if the channel has not yet completed funding and has not been used.
3003 /// Further, the channel must be immediately shut down after this with a call to
3004 /// [`ChannelContext::force_shutdown`].
3005 pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
3006 debug_assert!(matches!(
3007 self.context.channel_state, ChannelState::AwaitingChannelReady(_)
3009 self.context.channel_transaction_parameters.funding_outpoint = None;
3010 self.context.channel_id = temporary_channel_id;
3013 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3014 /// and the channel is now usable (and public), this may generate an announcement_signatures to
3016 pub fn channel_ready<NS: Deref, L: Deref>(
3017 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
3018 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3019 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3021 NS::Target: NodeSigner,
3024 if self.context.channel_state.is_peer_disconnected() {
3025 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3026 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3029 if let Some(scid_alias) = msg.short_channel_id_alias {
3030 if Some(scid_alias) != self.context.short_channel_id {
3031 // The scid alias provided can be used to route payments *from* our counterparty,
3032 // i.e. can be used for inbound payments and provided in invoices, but is not used
3033 // when routing outbound payments.
3034 self.context.latest_inbound_scid_alias = Some(scid_alias);
3038 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3039 // batch, but we can receive channel_ready messages.
3040 let mut check_reconnection = false;
3041 match &self.context.channel_state {
3042 ChannelState::AwaitingChannelReady(flags) => {
3043 let flags = *flags & !FundedStateFlags::ALL;
3044 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3045 if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3046 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3047 check_reconnection = true;
3048 } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3049 self.context.channel_state.set_their_channel_ready();
3050 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3051 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3052 self.context.update_time_counter += 1;
3054 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3055 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3058 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3059 ChannelState::ChannelReady(_) => check_reconnection = true,
3060 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
3062 if check_reconnection {
3063 // They probably disconnected/reconnected and re-sent the channel_ready, which is
3064 // required, or they're sending a fresh SCID alias.
3065 let expected_point =
3066 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3067 // If they haven't ever sent an updated point, the point they send should match
3069 self.context.counterparty_cur_commitment_point
3070 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3071 // If we've advanced the commitment number once, the second commitment point is
3072 // at `counterparty_prev_commitment_point`, which is not yet revoked.
3073 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3074 self.context.counterparty_prev_commitment_point
3076 // If they have sent updated points, channel_ready is always supposed to match
3077 // their "first" point, which we re-derive here.
3078 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3079 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3080 ).expect("We already advanced, so previous secret keys should have been validated already")))
3082 if expected_point != Some(msg.next_per_commitment_point) {
3083 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3088 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3089 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3091 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3093 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3096 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3097 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3098 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3099 ) -> Result<(), ChannelError>
3100 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3101 FE::Target: FeeEstimator, L::Target: Logger,
3103 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3104 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3106 // We can't accept HTLCs sent after we've sent a shutdown.
3107 if self.context.channel_state.is_local_shutdown_sent() {
3108 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3110 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3111 if self.context.channel_state.is_remote_shutdown_sent() {
3112 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3114 if self.context.channel_state.is_peer_disconnected() {
3115 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3117 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3118 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3120 if msg.amount_msat == 0 {
3121 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3123 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3124 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3127 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3128 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3129 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3130 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3132 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3133 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3136 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3137 // the reserve_satoshis we told them to always have as direct payment so that they lose
3138 // something if we punish them for broadcasting an old state).
3139 // Note that we don't really care about having a small/no to_remote output in our local
3140 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3141 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3142 // present in the next commitment transaction we send them (at least for fulfilled ones,
3143 // failed ones won't modify value_to_self).
3144 // Note that we will send HTLCs which another instance of rust-lightning would think
3145 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3146 // Channel state once they will not be present in the next received commitment
3148 let mut removed_outbound_total_msat = 0;
3149 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3150 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3151 removed_outbound_total_msat += htlc.amount_msat;
3152 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3153 removed_outbound_total_msat += htlc.amount_msat;
3157 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3158 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3161 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3162 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3163 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3165 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3166 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3167 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3168 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3169 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3170 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3171 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3175 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3176 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3177 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3178 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3179 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3180 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3181 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3185 let pending_value_to_self_msat =
3186 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3187 let pending_remote_value_msat =
3188 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3189 if pending_remote_value_msat < msg.amount_msat {
3190 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3193 // Check that the remote can afford to pay for this HTLC on-chain at the current
3194 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3196 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3197 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3198 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3200 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3201 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3205 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3206 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3208 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3209 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3213 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3214 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3218 if !self.context.is_outbound() {
3219 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3220 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3221 // side, only on the sender's. Note that with anchor outputs we are no longer as
3222 // sensitive to fee spikes, so we need to account for them.
3223 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3224 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3225 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3226 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3228 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3229 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3230 // the HTLC, i.e. its status is already set to failing.
3231 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3232 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3235 // Check that they won't violate our local required channel reserve by adding this HTLC.
3236 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3237 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3238 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3239 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3242 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3243 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3245 if msg.cltv_expiry >= 500000000 {
3246 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3249 if self.context.channel_state.is_local_shutdown_sent() {
3250 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3251 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3255 // Now update local state:
3256 self.context.next_counterparty_htlc_id += 1;
3257 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3258 htlc_id: msg.htlc_id,
3259 amount_msat: msg.amount_msat,
3260 payment_hash: msg.payment_hash,
3261 cltv_expiry: msg.cltv_expiry,
3262 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3267 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3269 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3270 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3271 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3272 if htlc.htlc_id == htlc_id {
3273 let outcome = match check_preimage {
3274 None => fail_reason.into(),
3275 Some(payment_preimage) => {
3276 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3277 if payment_hash != htlc.payment_hash {
3278 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3280 OutboundHTLCOutcome::Success(Some(payment_preimage))
3284 OutboundHTLCState::LocalAnnounced(_) =>
3285 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3286 OutboundHTLCState::Committed => {
3287 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3289 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3290 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3295 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3298 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3299 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3300 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3302 if self.context.channel_state.is_peer_disconnected() {
3303 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3306 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3309 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3310 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3311 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3313 if self.context.channel_state.is_peer_disconnected() {
3314 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3317 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3321 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3322 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3323 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3325 if self.context.channel_state.is_peer_disconnected() {
3326 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3329 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3333 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3334 where L::Target: Logger
3336 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3337 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3339 if self.context.channel_state.is_peer_disconnected() {
3340 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3342 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3343 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3346 let funding_script = self.context.get_funding_redeemscript();
3348 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3350 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3351 let commitment_txid = {
3352 let trusted_tx = commitment_stats.tx.trust();
3353 let bitcoin_tx = trusted_tx.built_transaction();
3354 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3356 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3357 log_bytes!(msg.signature.serialize_compact()[..]),
3358 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3359 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3360 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3361 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3365 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3367 // If our counterparty updated the channel fee in this commitment transaction, check that
3368 // they can actually afford the new fee now.
3369 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3370 update_state == FeeUpdateState::RemoteAnnounced
3373 debug_assert!(!self.context.is_outbound());
3374 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3375 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3376 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3379 #[cfg(any(test, fuzzing))]
3381 if self.context.is_outbound() {
3382 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3383 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3384 if let Some(info) = projected_commit_tx_info {
3385 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3386 + self.context.holding_cell_htlc_updates.len();
3387 if info.total_pending_htlcs == total_pending_htlcs
3388 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3389 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3390 && info.feerate == self.context.feerate_per_kw {
3391 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3397 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3398 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3401 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3402 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3403 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3404 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3405 // backwards compatibility, we never use it in production. To provide test coverage, here,
3406 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3407 #[allow(unused_assignments, unused_mut)]
3408 let mut separate_nondust_htlc_sources = false;
3409 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3410 use core::hash::{BuildHasher, Hasher};
3411 // Get a random value using the only std API to do so - the DefaultHasher
3412 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3413 separate_nondust_htlc_sources = rand_val % 2 == 0;
3416 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3417 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3418 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3419 if let Some(_) = htlc.transaction_output_index {
3420 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3421 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3422 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3424 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3425 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3426 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3427 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3428 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3429 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3430 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3431 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3433 if !separate_nondust_htlc_sources {
3434 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3437 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3439 if separate_nondust_htlc_sources {
3440 if let Some(source) = source_opt.take() {
3441 nondust_htlc_sources.push(source);
3444 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3447 let holder_commitment_tx = HolderCommitmentTransaction::new(
3448 commitment_stats.tx,
3450 msg.htlc_signatures.clone(),
3451 &self.context.get_holder_pubkeys().funding_pubkey,
3452 self.context.counterparty_funding_pubkey()
3455 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3456 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3458 // Update state now that we've passed all the can-fail calls...
3459 let mut need_commitment = false;
3460 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3461 if *update_state == FeeUpdateState::RemoteAnnounced {
3462 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3463 need_commitment = true;
3467 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3468 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3469 Some(forward_info.clone())
3471 if let Some(forward_info) = new_forward {
3472 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3473 &htlc.payment_hash, &self.context.channel_id);
3474 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3475 need_commitment = true;
3478 let mut claimed_htlcs = Vec::new();
3479 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3480 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3481 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3482 &htlc.payment_hash, &self.context.channel_id);
3483 // Grab the preimage, if it exists, instead of cloning
3484 let mut reason = OutboundHTLCOutcome::Success(None);
3485 mem::swap(outcome, &mut reason);
3486 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3487 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3488 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3489 // have a `Success(None)` reason. In this case we could forget some HTLC
3490 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3491 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3493 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3495 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3496 need_commitment = true;
3500 self.context.latest_monitor_update_id += 1;
3501 let mut monitor_update = ChannelMonitorUpdate {
3502 update_id: self.context.latest_monitor_update_id,
3503 counterparty_node_id: Some(self.context.counterparty_node_id),
3504 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3505 commitment_tx: holder_commitment_tx,
3506 htlc_outputs: htlcs_and_sigs,
3508 nondust_htlc_sources,
3512 self.context.cur_holder_commitment_transaction_number -= 1;
3513 self.context.expecting_peer_commitment_signed = false;
3514 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3515 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3516 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3518 if self.context.channel_state.is_monitor_update_in_progress() {
3519 // In case we initially failed monitor updating without requiring a response, we need
3520 // to make sure the RAA gets sent first.
3521 self.context.monitor_pending_revoke_and_ack = true;
3522 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3523 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3524 // the corresponding HTLC status updates so that
3525 // get_last_commitment_update_for_send includes the right HTLCs.
3526 self.context.monitor_pending_commitment_signed = true;
3527 let mut additional_update = self.build_commitment_no_status_check(logger);
3528 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3529 // strictly increasing by one, so decrement it here.
3530 self.context.latest_monitor_update_id = monitor_update.update_id;
3531 monitor_update.updates.append(&mut additional_update.updates);
3533 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3534 &self.context.channel_id);
3535 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3538 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3539 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3540 // we'll send one right away when we get the revoke_and_ack when we
3541 // free_holding_cell_htlcs().
3542 let mut additional_update = self.build_commitment_no_status_check(logger);
3543 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3544 // strictly increasing by one, so decrement it here.
3545 self.context.latest_monitor_update_id = monitor_update.update_id;
3546 monitor_update.updates.append(&mut additional_update.updates);
3550 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3551 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3552 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3553 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3556 /// Public version of the below, checking relevant preconditions first.
3557 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3558 /// returns `(None, Vec::new())`.
3559 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3560 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3561 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3562 where F::Target: FeeEstimator, L::Target: Logger
3564 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
3565 self.free_holding_cell_htlcs(fee_estimator, logger)
3566 } else { (None, Vec::new()) }
3569 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3570 /// for our counterparty.
3571 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3572 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3573 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3574 where F::Target: FeeEstimator, L::Target: Logger
3576 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3577 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3578 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3579 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3581 let mut monitor_update = ChannelMonitorUpdate {
3582 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3583 counterparty_node_id: Some(self.context.counterparty_node_id),
3584 updates: Vec::new(),
3587 let mut htlc_updates = Vec::new();
3588 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3589 let mut update_add_count = 0;
3590 let mut update_fulfill_count = 0;
3591 let mut update_fail_count = 0;
3592 let mut htlcs_to_fail = Vec::new();
3593 for htlc_update in htlc_updates.drain(..) {
3594 // Note that this *can* fail, though it should be due to rather-rare conditions on
3595 // fee races with adding too many outputs which push our total payments just over
3596 // the limit. In case it's less rare than I anticipate, we may want to revisit
3597 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3598 // to rebalance channels.
3599 match &htlc_update {
3600 &HTLCUpdateAwaitingACK::AddHTLC {
3601 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3602 skimmed_fee_msat, blinding_point, ..
3604 match self.send_htlc(
3605 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3606 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3608 Ok(_) => update_add_count += 1,
3611 ChannelError::Ignore(ref msg) => {
3612 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3613 // If we fail to send here, then this HTLC should
3614 // be failed backwards. Failing to send here
3615 // indicates that this HTLC may keep being put back
3616 // into the holding cell without ever being
3617 // successfully forwarded/failed/fulfilled, causing
3618 // our counterparty to eventually close on us.
3619 htlcs_to_fail.push((source.clone(), *payment_hash));
3622 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3628 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3629 // If an HTLC claim was previously added to the holding cell (via
3630 // `get_update_fulfill_htlc`, then generating the claim message itself must
3631 // not fail - any in between attempts to claim the HTLC will have resulted
3632 // in it hitting the holding cell again and we cannot change the state of a
3633 // holding cell HTLC from fulfill to anything else.
3634 let mut additional_monitor_update =
3635 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3636 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3637 { monitor_update } else { unreachable!() };
3638 update_fulfill_count += 1;
3639 monitor_update.updates.append(&mut additional_monitor_update.updates);
3641 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3642 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3643 Ok(update_fail_msg_option) => {
3644 // If an HTLC failure was previously added to the holding cell (via
3645 // `queue_fail_htlc`) then generating the fail message itself must
3646 // not fail - we should never end up in a state where we double-fail
3647 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3648 // for a full revocation before failing.
3649 debug_assert!(update_fail_msg_option.is_some());
3650 update_fail_count += 1;
3653 if let ChannelError::Ignore(_) = e {}
3655 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3660 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
3661 match self.fail_htlc(htlc_id, (failure_code, sha256_of_onion), false, logger) {
3662 Ok(update_fail_malformed_opt) => {
3663 debug_assert!(update_fail_malformed_opt.is_some()); // See above comment
3664 update_fail_count += 1;
3667 if let ChannelError::Ignore(_) = e {}
3669 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3676 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3677 return (None, htlcs_to_fail);
3679 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3680 self.send_update_fee(feerate, false, fee_estimator, logger)
3685 let mut additional_update = self.build_commitment_no_status_check(logger);
3686 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3687 // but we want them to be strictly increasing by one, so reset it here.
3688 self.context.latest_monitor_update_id = monitor_update.update_id;
3689 monitor_update.updates.append(&mut additional_update.updates);
3691 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3692 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3693 update_add_count, update_fulfill_count, update_fail_count);
3695 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3696 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3702 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3703 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3704 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3705 /// generating an appropriate error *after* the channel state has been updated based on the
3706 /// revoke_and_ack message.
3707 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3708 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3709 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3710 where F::Target: FeeEstimator, L::Target: Logger,
3712 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3713 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3715 if self.context.channel_state.is_peer_disconnected() {
3716 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3718 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3719 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3722 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3724 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3725 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3726 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3730 if !self.context.channel_state.is_awaiting_remote_revoke() {
3731 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3732 // haven't given them a new commitment transaction to broadcast). We should probably
3733 // take advantage of this by updating our channel monitor, sending them an error, and
3734 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3735 // lot of work, and there's some chance this is all a misunderstanding anyway.
3736 // We have to do *something*, though, since our signer may get mad at us for otherwise
3737 // jumping a remote commitment number, so best to just force-close and move on.
3738 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3741 #[cfg(any(test, fuzzing))]
3743 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3744 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3747 match &self.context.holder_signer {
3748 ChannelSignerType::Ecdsa(ecdsa) => {
3749 ecdsa.validate_counterparty_revocation(
3750 self.context.cur_counterparty_commitment_transaction_number + 1,
3752 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3754 // TODO (taproot|arik)
3759 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3760 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3761 self.context.latest_monitor_update_id += 1;
3762 let mut monitor_update = ChannelMonitorUpdate {
3763 update_id: self.context.latest_monitor_update_id,
3764 counterparty_node_id: Some(self.context.counterparty_node_id),
3765 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3766 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3767 secret: msg.per_commitment_secret,
3771 // Update state now that we've passed all the can-fail calls...
3772 // (note that we may still fail to generate the new commitment_signed message, but that's
3773 // OK, we step the channel here and *then* if the new generation fails we can fail the
3774 // channel based on that, but stepping stuff here should be safe either way.
3775 self.context.channel_state.clear_awaiting_remote_revoke();
3776 self.context.sent_message_awaiting_response = None;
3777 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3778 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3779 self.context.cur_counterparty_commitment_transaction_number -= 1;
3781 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3782 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3785 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3786 let mut to_forward_infos = Vec::new();
3787 let mut revoked_htlcs = Vec::new();
3788 let mut finalized_claimed_htlcs = Vec::new();
3789 let mut update_fail_htlcs = Vec::new();
3790 let mut update_fail_malformed_htlcs = Vec::new();
3791 let mut require_commitment = false;
3792 let mut value_to_self_msat_diff: i64 = 0;
3795 // Take references explicitly so that we can hold multiple references to self.context.
3796 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3797 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3798 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3800 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3801 pending_inbound_htlcs.retain(|htlc| {
3802 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3803 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3804 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3805 value_to_self_msat_diff += htlc.amount_msat as i64;
3807 *expecting_peer_commitment_signed = true;
3811 pending_outbound_htlcs.retain(|htlc| {
3812 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3813 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3814 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3815 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3817 finalized_claimed_htlcs.push(htlc.source.clone());
3818 // They fulfilled, so we sent them money
3819 value_to_self_msat_diff -= htlc.amount_msat as i64;
3824 for htlc in pending_inbound_htlcs.iter_mut() {
3825 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3827 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3831 let mut state = InboundHTLCState::Committed;
3832 mem::swap(&mut state, &mut htlc.state);
3834 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3835 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3836 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3837 require_commitment = true;
3838 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3839 match forward_info {
3840 PendingHTLCStatus::Fail(fail_msg) => {
3841 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3842 require_commitment = true;
3844 HTLCFailureMsg::Relay(msg) => {
3845 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3846 update_fail_htlcs.push(msg)
3848 HTLCFailureMsg::Malformed(msg) => {
3849 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3850 update_fail_malformed_htlcs.push(msg)
3854 PendingHTLCStatus::Forward(forward_info) => {
3855 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3856 to_forward_infos.push((forward_info, htlc.htlc_id));
3857 htlc.state = InboundHTLCState::Committed;
3863 for htlc in pending_outbound_htlcs.iter_mut() {
3864 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3865 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3866 htlc.state = OutboundHTLCState::Committed;
3867 *expecting_peer_commitment_signed = true;
3869 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3870 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3871 // Grab the preimage, if it exists, instead of cloning
3872 let mut reason = OutboundHTLCOutcome::Success(None);
3873 mem::swap(outcome, &mut reason);
3874 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3875 require_commitment = true;
3879 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3881 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3882 match update_state {
3883 FeeUpdateState::Outbound => {
3884 debug_assert!(self.context.is_outbound());
3885 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3886 self.context.feerate_per_kw = feerate;
3887 self.context.pending_update_fee = None;
3888 self.context.expecting_peer_commitment_signed = true;
3890 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3891 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3892 debug_assert!(!self.context.is_outbound());
3893 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3894 require_commitment = true;
3895 self.context.feerate_per_kw = feerate;
3896 self.context.pending_update_fee = None;
3901 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3902 let release_state_str =
3903 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3904 macro_rules! return_with_htlcs_to_fail {
3905 ($htlcs_to_fail: expr) => {
3906 if !release_monitor {
3907 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3908 update: monitor_update,
3910 return Ok(($htlcs_to_fail, None));
3912 return Ok(($htlcs_to_fail, Some(monitor_update)));
3917 if self.context.channel_state.is_monitor_update_in_progress() {
3918 // We can't actually generate a new commitment transaction (incl by freeing holding
3919 // cells) while we can't update the monitor, so we just return what we have.
3920 if require_commitment {
3921 self.context.monitor_pending_commitment_signed = true;
3922 // When the monitor updating is restored we'll call
3923 // get_last_commitment_update_for_send(), which does not update state, but we're
3924 // definitely now awaiting a remote revoke before we can step forward any more, so
3926 let mut additional_update = self.build_commitment_no_status_check(logger);
3927 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3928 // strictly increasing by one, so decrement it here.
3929 self.context.latest_monitor_update_id = monitor_update.update_id;
3930 monitor_update.updates.append(&mut additional_update.updates);
3932 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3933 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3934 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3935 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3936 return_with_htlcs_to_fail!(Vec::new());
3939 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3940 (Some(mut additional_update), htlcs_to_fail) => {
3941 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3942 // strictly increasing by one, so decrement it here.
3943 self.context.latest_monitor_update_id = monitor_update.update_id;
3944 monitor_update.updates.append(&mut additional_update.updates);
3946 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3947 &self.context.channel_id(), release_state_str);
3949 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3950 return_with_htlcs_to_fail!(htlcs_to_fail);
3952 (None, htlcs_to_fail) => {
3953 if require_commitment {
3954 let mut additional_update = self.build_commitment_no_status_check(logger);
3956 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3957 // strictly increasing by one, so decrement it here.
3958 self.context.latest_monitor_update_id = monitor_update.update_id;
3959 monitor_update.updates.append(&mut additional_update.updates);
3961 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3962 &self.context.channel_id(),
3963 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3966 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3967 return_with_htlcs_to_fail!(htlcs_to_fail);
3969 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3970 &self.context.channel_id(), release_state_str);
3972 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3973 return_with_htlcs_to_fail!(htlcs_to_fail);
3979 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3980 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3981 /// commitment update.
3982 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3983 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3984 where F::Target: FeeEstimator, L::Target: Logger
3986 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3987 assert!(msg_opt.is_none(), "We forced holding cell?");
3990 /// Adds a pending update to this channel. See the doc for send_htlc for
3991 /// further details on the optionness of the return value.
3992 /// If our balance is too low to cover the cost of the next commitment transaction at the
3993 /// new feerate, the update is cancelled.
3995 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3996 /// [`Channel`] if `force_holding_cell` is false.
3997 fn send_update_fee<F: Deref, L: Deref>(
3998 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3999 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4000 ) -> Option<msgs::UpdateFee>
4001 where F::Target: FeeEstimator, L::Target: Logger
4003 if !self.context.is_outbound() {
4004 panic!("Cannot send fee from inbound channel");
4006 if !self.context.is_usable() {
4007 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
4009 if !self.context.is_live() {
4010 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
4013 // Before proposing a feerate update, check that we can actually afford the new fee.
4014 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
4015 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
4016 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4017 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4018 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
4019 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
4020 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4021 //TODO: auto-close after a number of failures?
4022 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
4026 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
4027 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4028 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4029 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4030 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4031 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4034 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4035 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4039 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4040 force_holding_cell = true;
4043 if force_holding_cell {
4044 self.context.holding_cell_update_fee = Some(feerate_per_kw);
4048 debug_assert!(self.context.pending_update_fee.is_none());
4049 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4051 Some(msgs::UpdateFee {
4052 channel_id: self.context.channel_id,
4057 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4058 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4060 /// No further message handling calls may be made until a channel_reestablish dance has
4062 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
4063 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
4064 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4065 if self.context.channel_state.is_pre_funded_state() {
4069 if self.context.channel_state.is_peer_disconnected() {
4070 // While the below code should be idempotent, it's simpler to just return early, as
4071 // redundant disconnect events can fire, though they should be rare.
4075 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4076 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4079 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4080 // will be retransmitted.
4081 self.context.last_sent_closing_fee = None;
4082 self.context.pending_counterparty_closing_signed = None;
4083 self.context.closing_fee_limits = None;
4085 let mut inbound_drop_count = 0;
4086 self.context.pending_inbound_htlcs.retain(|htlc| {
4088 InboundHTLCState::RemoteAnnounced(_) => {
4089 // They sent us an update_add_htlc but we never got the commitment_signed.
4090 // We'll tell them what commitment_signed we're expecting next and they'll drop
4091 // this HTLC accordingly
4092 inbound_drop_count += 1;
4095 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4096 // We received a commitment_signed updating this HTLC and (at least hopefully)
4097 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4098 // in response to it yet, so don't touch it.
4101 InboundHTLCState::Committed => true,
4102 InboundHTLCState::LocalRemoved(_) => {
4103 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4104 // re-transmit if needed) and they may have even sent a revoke_and_ack back
4105 // (that we missed). Keep this around for now and if they tell us they missed
4106 // the commitment_signed we can re-transmit the update then.
4111 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4113 if let Some((_, update_state)) = self.context.pending_update_fee {
4114 if update_state == FeeUpdateState::RemoteAnnounced {
4115 debug_assert!(!self.context.is_outbound());
4116 self.context.pending_update_fee = None;
4120 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4121 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4122 // They sent us an update to remove this but haven't yet sent the corresponding
4123 // commitment_signed, we need to move it back to Committed and they can re-send
4124 // the update upon reconnection.
4125 htlc.state = OutboundHTLCState::Committed;
4129 self.context.sent_message_awaiting_response = None;
4131 self.context.channel_state.set_peer_disconnected();
4132 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4136 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4137 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4138 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4139 /// update completes (potentially immediately).
4140 /// The messages which were generated with the monitor update must *not* have been sent to the
4141 /// remote end, and must instead have been dropped. They will be regenerated when
4142 /// [`Self::monitor_updating_restored`] is called.
4144 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4145 /// [`chain::Watch`]: crate::chain::Watch
4146 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4147 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4148 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4149 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4150 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4152 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4153 self.context.monitor_pending_commitment_signed |= resend_commitment;
4154 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4155 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4156 self.context.monitor_pending_failures.append(&mut pending_fails);
4157 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4158 self.context.channel_state.set_monitor_update_in_progress();
4161 /// Indicates that the latest ChannelMonitor update has been committed by the client
4162 /// successfully and we should restore normal operation. Returns messages which should be sent
4163 /// to the remote side.
4164 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4165 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4166 user_config: &UserConfig, best_block_height: u32
4167 ) -> MonitorRestoreUpdates
4170 NS::Target: NodeSigner
4172 assert!(self.context.channel_state.is_monitor_update_in_progress());
4173 self.context.channel_state.clear_monitor_update_in_progress();
4175 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4176 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4177 // first received the funding_signed.
4178 let mut funding_broadcastable =
4179 if self.context.is_outbound() &&
4180 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4181 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
4183 self.context.funding_transaction.take()
4185 // That said, if the funding transaction is already confirmed (ie we're active with a
4186 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4187 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4188 funding_broadcastable = None;
4191 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4192 // (and we assume the user never directly broadcasts the funding transaction and waits for
4193 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4194 // * an inbound channel that failed to persist the monitor on funding_created and we got
4195 // the funding transaction confirmed before the monitor was persisted, or
4196 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4197 let channel_ready = if self.context.monitor_pending_channel_ready {
4198 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4199 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4200 self.context.monitor_pending_channel_ready = false;
4201 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4202 Some(msgs::ChannelReady {
4203 channel_id: self.context.channel_id(),
4204 next_per_commitment_point,
4205 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4209 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4211 let mut accepted_htlcs = Vec::new();
4212 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4213 let mut failed_htlcs = Vec::new();
4214 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4215 let mut finalized_claimed_htlcs = Vec::new();
4216 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4218 if self.context.channel_state.is_peer_disconnected() {
4219 self.context.monitor_pending_revoke_and_ack = false;
4220 self.context.monitor_pending_commitment_signed = false;
4221 return MonitorRestoreUpdates {
4222 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4223 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4227 let raa = if self.context.monitor_pending_revoke_and_ack {
4228 Some(self.get_last_revoke_and_ack())
4230 let commitment_update = if self.context.monitor_pending_commitment_signed {
4231 self.get_last_commitment_update_for_send(logger).ok()
4233 if commitment_update.is_some() {
4234 self.mark_awaiting_response();
4237 self.context.monitor_pending_revoke_and_ack = false;
4238 self.context.monitor_pending_commitment_signed = false;
4239 let order = self.context.resend_order.clone();
4240 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4241 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4242 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4243 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4244 MonitorRestoreUpdates {
4245 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4249 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4250 where F::Target: FeeEstimator, L::Target: Logger
4252 if self.context.is_outbound() {
4253 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4255 if self.context.channel_state.is_peer_disconnected() {
4256 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4258 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4260 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4261 self.context.update_time_counter += 1;
4262 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4263 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4264 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4265 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4266 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4267 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4268 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4269 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4270 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4271 msg.feerate_per_kw, holder_tx_dust_exposure)));
4273 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4274 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4275 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4281 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4283 #[cfg(async_signing)]
4284 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4285 let commitment_update = if self.context.signer_pending_commitment_update {
4286 self.get_last_commitment_update_for_send(logger).ok()
4288 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4289 self.context.get_funding_signed_msg(logger).1
4291 let channel_ready = if funding_signed.is_some() {
4292 self.check_get_channel_ready(0)
4295 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
4296 if commitment_update.is_some() { "a" } else { "no" },
4297 if funding_signed.is_some() { "a" } else { "no" },
4298 if channel_ready.is_some() { "a" } else { "no" });
4300 SignerResumeUpdates {
4307 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4308 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4309 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4310 msgs::RevokeAndACK {
4311 channel_id: self.context.channel_id,
4312 per_commitment_secret,
4313 next_per_commitment_point,
4315 next_local_nonce: None,
4319 /// Gets the last commitment update for immediate sending to our peer.
4320 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4321 let mut update_add_htlcs = Vec::new();
4322 let mut update_fulfill_htlcs = Vec::new();
4323 let mut update_fail_htlcs = Vec::new();
4324 let mut update_fail_malformed_htlcs = Vec::new();
4326 for htlc in self.context.pending_outbound_htlcs.iter() {
4327 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4328 update_add_htlcs.push(msgs::UpdateAddHTLC {
4329 channel_id: self.context.channel_id(),
4330 htlc_id: htlc.htlc_id,
4331 amount_msat: htlc.amount_msat,
4332 payment_hash: htlc.payment_hash,
4333 cltv_expiry: htlc.cltv_expiry,
4334 onion_routing_packet: (**onion_packet).clone(),
4335 skimmed_fee_msat: htlc.skimmed_fee_msat,
4336 blinding_point: htlc.blinding_point,
4341 for htlc in self.context.pending_inbound_htlcs.iter() {
4342 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4344 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4345 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4346 channel_id: self.context.channel_id(),
4347 htlc_id: htlc.htlc_id,
4348 reason: err_packet.clone()
4351 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4352 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4353 channel_id: self.context.channel_id(),
4354 htlc_id: htlc.htlc_id,
4355 sha256_of_onion: sha256_of_onion.clone(),
4356 failure_code: failure_code.clone(),
4359 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4360 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4361 channel_id: self.context.channel_id(),
4362 htlc_id: htlc.htlc_id,
4363 payment_preimage: payment_preimage.clone(),
4370 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4371 Some(msgs::UpdateFee {
4372 channel_id: self.context.channel_id(),
4373 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4377 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4378 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4379 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4380 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4381 if self.context.signer_pending_commitment_update {
4382 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4383 self.context.signer_pending_commitment_update = false;
4387 #[cfg(not(async_signing))] {
4388 panic!("Failed to get signature for new commitment state");
4390 #[cfg(async_signing)] {
4391 if !self.context.signer_pending_commitment_update {
4392 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4393 self.context.signer_pending_commitment_update = true;
4398 Ok(msgs::CommitmentUpdate {
4399 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4404 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4405 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4406 if self.context.channel_state.is_local_shutdown_sent() {
4407 assert!(self.context.shutdown_scriptpubkey.is_some());
4408 Some(msgs::Shutdown {
4409 channel_id: self.context.channel_id,
4410 scriptpubkey: self.get_closing_scriptpubkey(),
4415 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4416 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4418 /// Some links printed in log lines are included here to check them during build (when run with
4419 /// `cargo doc --document-private-items`):
4420 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4421 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4422 pub fn channel_reestablish<L: Deref, NS: Deref>(
4423 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4424 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4425 ) -> Result<ReestablishResponses, ChannelError>
4428 NS::Target: NodeSigner
4430 if !self.context.channel_state.is_peer_disconnected() {
4431 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4432 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4433 // just close here instead of trying to recover.
4434 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4437 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4438 msg.next_local_commitment_number == 0 {
4439 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4442 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4443 if msg.next_remote_commitment_number > 0 {
4444 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4445 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4446 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4447 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4448 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4450 if msg.next_remote_commitment_number > our_commitment_transaction {
4451 macro_rules! log_and_panic {
4452 ($err_msg: expr) => {
4453 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4454 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4457 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4458 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4459 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4460 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4461 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4462 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4463 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4464 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4468 // Before we change the state of the channel, we check if the peer is sending a very old
4469 // commitment transaction number, if yes we send a warning message.
4470 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4471 return Err(ChannelError::Warn(format!(
4472 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4473 msg.next_remote_commitment_number,
4474 our_commitment_transaction
4478 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4479 // remaining cases either succeed or ErrorMessage-fail).
4480 self.context.channel_state.clear_peer_disconnected();
4481 self.context.sent_message_awaiting_response = None;
4483 let shutdown_msg = self.get_outbound_shutdown();
4485 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4487 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4488 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4489 if !self.context.channel_state.is_our_channel_ready() ||
4490 self.context.channel_state.is_monitor_update_in_progress() {
4491 if msg.next_remote_commitment_number != 0 {
4492 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4494 // Short circuit the whole handler as there is nothing we can resend them
4495 return Ok(ReestablishResponses {
4496 channel_ready: None,
4497 raa: None, commitment_update: None,
4498 order: RAACommitmentOrder::CommitmentFirst,
4499 shutdown_msg, announcement_sigs,
4503 // We have OurChannelReady set!
4504 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4505 return Ok(ReestablishResponses {
4506 channel_ready: Some(msgs::ChannelReady {
4507 channel_id: self.context.channel_id(),
4508 next_per_commitment_point,
4509 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4511 raa: None, commitment_update: None,
4512 order: RAACommitmentOrder::CommitmentFirst,
4513 shutdown_msg, announcement_sigs,
4517 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4518 // Remote isn't waiting on any RevokeAndACK from us!
4519 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4521 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4522 if self.context.channel_state.is_monitor_update_in_progress() {
4523 self.context.monitor_pending_revoke_and_ack = true;
4526 Some(self.get_last_revoke_and_ack())
4529 debug_assert!(false, "All values should have been handled in the four cases above");
4530 return Err(ChannelError::Close(format!(
4531 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4532 msg.next_remote_commitment_number,
4533 our_commitment_transaction
4537 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4538 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4539 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4540 // the corresponding revoke_and_ack back yet.
4541 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4542 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4543 self.mark_awaiting_response();
4545 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4547 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4548 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4549 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4550 Some(msgs::ChannelReady {
4551 channel_id: self.context.channel_id(),
4552 next_per_commitment_point,
4553 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4557 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4558 if required_revoke.is_some() {
4559 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4561 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4564 Ok(ReestablishResponses {
4565 channel_ready, shutdown_msg, announcement_sigs,
4566 raa: required_revoke,
4567 commitment_update: None,
4568 order: self.context.resend_order.clone(),
4570 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4571 if required_revoke.is_some() {
4572 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4574 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4577 if self.context.channel_state.is_monitor_update_in_progress() {
4578 self.context.monitor_pending_commitment_signed = true;
4579 Ok(ReestablishResponses {
4580 channel_ready, shutdown_msg, announcement_sigs,
4581 commitment_update: None, raa: None,
4582 order: self.context.resend_order.clone(),
4585 Ok(ReestablishResponses {
4586 channel_ready, shutdown_msg, announcement_sigs,
4587 raa: required_revoke,
4588 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4589 order: self.context.resend_order.clone(),
4592 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4593 Err(ChannelError::Close(format!(
4594 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4595 msg.next_local_commitment_number,
4596 next_counterparty_commitment_number,
4599 Err(ChannelError::Close(format!(
4600 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4601 msg.next_local_commitment_number,
4602 next_counterparty_commitment_number,
4607 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4608 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4609 /// at which point they will be recalculated.
4610 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4612 where F::Target: FeeEstimator
4614 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4616 // Propose a range from our current Background feerate to our Normal feerate plus our
4617 // force_close_avoidance_max_fee_satoshis.
4618 // If we fail to come to consensus, we'll have to force-close.
4619 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4620 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4621 // that we don't expect to need fee bumping
4622 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4623 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4625 // The spec requires that (when the channel does not have anchors) we only send absolute
4626 // channel fees no greater than the absolute channel fee on the current commitment
4627 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4628 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4629 // some force-closure by old nodes, but we wanted to close the channel anyway.
4631 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4632 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4633 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4634 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4637 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4638 // below our dust limit, causing the output to disappear. We don't bother handling this
4639 // case, however, as this should only happen if a channel is closed before any (material)
4640 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4641 // come to consensus with our counterparty on appropriate fees, however it should be a
4642 // relatively rare case. We can revisit this later, though note that in order to determine
4643 // if the funders' output is dust we have to know the absolute fee we're going to use.
4644 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4645 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4646 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4647 // We always add force_close_avoidance_max_fee_satoshis to our normal
4648 // feerate-calculated fee, but allow the max to be overridden if we're using a
4649 // target feerate-calculated fee.
4650 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4651 proposed_max_feerate as u64 * tx_weight / 1000)
4653 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4656 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4657 self.context.closing_fee_limits.clone().unwrap()
4660 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4661 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4662 /// this point if we're the funder we should send the initial closing_signed, and in any case
4663 /// shutdown should complete within a reasonable timeframe.
4664 fn closing_negotiation_ready(&self) -> bool {
4665 self.context.closing_negotiation_ready()
4668 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4669 /// an Err if no progress is being made and the channel should be force-closed instead.
4670 /// Should be called on a one-minute timer.
4671 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4672 if self.closing_negotiation_ready() {
4673 if self.context.closing_signed_in_flight {
4674 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4676 self.context.closing_signed_in_flight = true;
4682 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4683 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4684 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4685 where F::Target: FeeEstimator, L::Target: Logger
4687 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4688 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4689 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4690 // that closing_negotiation_ready checks this case (as well as a few others).
4691 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4692 return Ok((None, None, None));
4695 if !self.context.is_outbound() {
4696 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4697 return self.closing_signed(fee_estimator, &msg);
4699 return Ok((None, None, None));
4702 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4703 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4704 if self.context.expecting_peer_commitment_signed {
4705 return Ok((None, None, None));
4708 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4710 assert!(self.context.shutdown_scriptpubkey.is_some());
4711 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4712 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4713 our_min_fee, our_max_fee, total_fee_satoshis);
4715 match &self.context.holder_signer {
4716 ChannelSignerType::Ecdsa(ecdsa) => {
4718 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4719 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4721 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4722 Ok((Some(msgs::ClosingSigned {
4723 channel_id: self.context.channel_id,
4724 fee_satoshis: total_fee_satoshis,
4726 fee_range: Some(msgs::ClosingSignedFeeRange {
4727 min_fee_satoshis: our_min_fee,
4728 max_fee_satoshis: our_max_fee,
4732 // TODO (taproot|arik)
4738 // Marks a channel as waiting for a response from the counterparty. If it's not received
4739 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4741 fn mark_awaiting_response(&mut self) {
4742 self.context.sent_message_awaiting_response = Some(0);
4745 /// Determines whether we should disconnect the counterparty due to not receiving a response
4746 /// within our expected timeframe.
4748 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4749 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4750 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4753 // Don't disconnect when we're not waiting on a response.
4756 *ticks_elapsed += 1;
4757 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4761 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4762 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4764 if self.context.channel_state.is_peer_disconnected() {
4765 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4767 if self.context.channel_state.is_pre_funded_state() {
4768 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4769 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4770 // can do that via error message without getting a connection fail anyway...
4771 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4773 for htlc in self.context.pending_inbound_htlcs.iter() {
4774 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4775 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4778 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4780 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4781 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4784 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4785 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4786 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4789 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4792 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4793 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4794 // any further commitment updates after we set LocalShutdownSent.
4795 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
4797 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4800 assert!(send_shutdown);
4801 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4802 Ok(scriptpubkey) => scriptpubkey,
4803 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4805 if !shutdown_scriptpubkey.is_compatible(their_features) {
4806 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4808 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4813 // From here on out, we may not fail!
4815 self.context.channel_state.set_remote_shutdown_sent();
4816 self.context.update_time_counter += 1;
4818 let monitor_update = if update_shutdown_script {
4819 self.context.latest_monitor_update_id += 1;
4820 let monitor_update = ChannelMonitorUpdate {
4821 update_id: self.context.latest_monitor_update_id,
4822 counterparty_node_id: Some(self.context.counterparty_node_id),
4823 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4824 scriptpubkey: self.get_closing_scriptpubkey(),
4827 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4828 self.push_ret_blockable_mon_update(monitor_update)
4830 let shutdown = if send_shutdown {
4831 Some(msgs::Shutdown {
4832 channel_id: self.context.channel_id,
4833 scriptpubkey: self.get_closing_scriptpubkey(),
4837 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4838 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4839 // cell HTLCs and return them to fail the payment.
4840 self.context.holding_cell_update_fee = None;
4841 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4842 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4844 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4845 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4852 self.context.channel_state.set_local_shutdown_sent();
4853 self.context.update_time_counter += 1;
4855 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4858 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4859 let mut tx = closing_tx.trust().built_transaction().clone();
4861 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4863 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4864 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4865 let mut holder_sig = sig.serialize_der().to_vec();
4866 holder_sig.push(EcdsaSighashType::All as u8);
4867 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4868 cp_sig.push(EcdsaSighashType::All as u8);
4869 if funding_key[..] < counterparty_funding_key[..] {
4870 tx.input[0].witness.push(holder_sig);
4871 tx.input[0].witness.push(cp_sig);
4873 tx.input[0].witness.push(cp_sig);
4874 tx.input[0].witness.push(holder_sig);
4877 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4881 pub fn closing_signed<F: Deref>(
4882 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4883 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4884 where F::Target: FeeEstimator
4886 if !self.context.channel_state.is_both_sides_shutdown() {
4887 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4889 if self.context.channel_state.is_peer_disconnected() {
4890 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4892 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4893 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4895 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4896 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4899 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4900 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4903 if self.context.channel_state.is_monitor_update_in_progress() {
4904 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4905 return Ok((None, None, None));
4908 let funding_redeemscript = self.context.get_funding_redeemscript();
4909 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4910 if used_total_fee != msg.fee_satoshis {
4911 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4913 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4915 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4918 // The remote end may have decided to revoke their output due to inconsistent dust
4919 // limits, so check for that case by re-checking the signature here.
4920 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4921 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4922 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4926 for outp in closing_tx.trust().built_transaction().output.iter() {
4927 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4928 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4932 assert!(self.context.shutdown_scriptpubkey.is_some());
4933 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4934 if last_fee == msg.fee_satoshis {
4935 let shutdown_result = ShutdownResult {
4936 monitor_update: None,
4937 dropped_outbound_htlcs: Vec::new(),
4938 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4939 channel_id: self.context.channel_id,
4940 counterparty_node_id: self.context.counterparty_node_id,
4942 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4943 self.context.channel_state = ChannelState::ShutdownComplete;
4944 self.context.update_time_counter += 1;
4945 return Ok((None, Some(tx), Some(shutdown_result)));
4949 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4951 macro_rules! propose_fee {
4952 ($new_fee: expr) => {
4953 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4954 (closing_tx, $new_fee)
4956 self.build_closing_transaction($new_fee, false)
4959 return match &self.context.holder_signer {
4960 ChannelSignerType::Ecdsa(ecdsa) => {
4962 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4963 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4964 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4965 let shutdown_result = ShutdownResult {
4966 monitor_update: None,
4967 dropped_outbound_htlcs: Vec::new(),
4968 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4969 channel_id: self.context.channel_id,
4970 counterparty_node_id: self.context.counterparty_node_id,
4972 self.context.channel_state = ChannelState::ShutdownComplete;
4973 self.context.update_time_counter += 1;
4974 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4975 (Some(tx), Some(shutdown_result))
4980 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4981 Ok((Some(msgs::ClosingSigned {
4982 channel_id: self.context.channel_id,
4983 fee_satoshis: used_fee,
4985 fee_range: Some(msgs::ClosingSignedFeeRange {
4986 min_fee_satoshis: our_min_fee,
4987 max_fee_satoshis: our_max_fee,
4989 }), signed_tx, shutdown_result))
4991 // TODO (taproot|arik)
4998 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4999 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
5000 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
5002 if max_fee_satoshis < our_min_fee {
5003 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
5005 if min_fee_satoshis > our_max_fee {
5006 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
5009 if !self.context.is_outbound() {
5010 // They have to pay, so pick the highest fee in the overlapping range.
5011 // We should never set an upper bound aside from their full balance
5012 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
5013 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
5015 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
5016 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
5017 msg.fee_satoshis, our_min_fee, our_max_fee)));
5019 // The proposed fee is in our acceptable range, accept it and broadcast!
5020 propose_fee!(msg.fee_satoshis);
5023 // Old fee style negotiation. We don't bother to enforce whether they are complying
5024 // with the "making progress" requirements, we just comply and hope for the best.
5025 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
5026 if msg.fee_satoshis > last_fee {
5027 if msg.fee_satoshis < our_max_fee {
5028 propose_fee!(msg.fee_satoshis);
5029 } else if last_fee < our_max_fee {
5030 propose_fee!(our_max_fee);
5032 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5035 if msg.fee_satoshis > our_min_fee {
5036 propose_fee!(msg.fee_satoshis);
5037 } else if last_fee > our_min_fee {
5038 propose_fee!(our_min_fee);
5040 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5044 if msg.fee_satoshis < our_min_fee {
5045 propose_fee!(our_min_fee);
5046 } else if msg.fee_satoshis > our_max_fee {
5047 propose_fee!(our_max_fee);
5049 propose_fee!(msg.fee_satoshis);
5055 fn internal_htlc_satisfies_config(
5056 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
5057 ) -> Result<(), (&'static str, u16)> {
5058 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
5059 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
5060 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
5061 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5063 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5064 0x1000 | 12, // fee_insufficient
5067 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5069 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5070 0x1000 | 13, // incorrect_cltv_expiry
5076 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5077 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5078 /// unsuccessful, falls back to the previous one if one exists.
5079 pub fn htlc_satisfies_config(
5080 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5081 ) -> Result<(), (&'static str, u16)> {
5082 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5084 if let Some(prev_config) = self.context.prev_config() {
5085 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5092 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5093 self.context.cur_holder_commitment_transaction_number + 1
5096 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5097 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
5100 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5101 self.context.cur_counterparty_commitment_transaction_number + 2
5105 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5106 &self.context.holder_signer
5110 pub fn get_value_stat(&self) -> ChannelValueStat {
5112 value_to_self_msat: self.context.value_to_self_msat,
5113 channel_value_msat: self.context.channel_value_satoshis * 1000,
5114 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5115 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5116 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5117 holding_cell_outbound_amount_msat: {
5119 for h in self.context.holding_cell_htlc_updates.iter() {
5121 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5129 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5130 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5134 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5135 /// Allowed in any state (including after shutdown)
5136 pub fn is_awaiting_monitor_update(&self) -> bool {
5137 self.context.channel_state.is_monitor_update_in_progress()
5140 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5141 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5142 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5143 self.context.blocked_monitor_updates[0].update.update_id - 1
5146 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5147 /// further blocked monitor update exists after the next.
5148 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5149 if self.context.blocked_monitor_updates.is_empty() { return None; }
5150 Some((self.context.blocked_monitor_updates.remove(0).update,
5151 !self.context.blocked_monitor_updates.is_empty()))
5154 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5155 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5156 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5157 -> Option<ChannelMonitorUpdate> {
5158 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5159 if !release_monitor {
5160 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5169 pub fn blocked_monitor_updates_pending(&self) -> usize {
5170 self.context.blocked_monitor_updates.len()
5173 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5174 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5175 /// transaction. If the channel is inbound, this implies simply that the channel has not
5177 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5178 if !self.is_awaiting_monitor_update() { return false; }
5180 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5181 if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
5183 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5184 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5185 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5188 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5189 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5190 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5191 // waiting for the initial monitor persistence. Thus, we check if our commitment
5192 // transaction numbers have both been iterated only exactly once (for the
5193 // funding_signed), and we're awaiting monitor update.
5195 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5196 // only way to get an awaiting-monitor-update state during initial funding is if the
5197 // initial monitor persistence is still pending).
5199 // Because deciding we're awaiting initial broadcast spuriously could result in
5200 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5201 // we hard-assert here, even in production builds.
5202 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5203 assert!(self.context.monitor_pending_channel_ready);
5204 assert_eq!(self.context.latest_monitor_update_id, 0);
5210 /// Returns true if our channel_ready has been sent
5211 pub fn is_our_channel_ready(&self) -> bool {
5212 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5213 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5216 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5217 pub fn received_shutdown(&self) -> bool {
5218 self.context.channel_state.is_remote_shutdown_sent()
5221 /// Returns true if we either initiated or agreed to shut down the channel.
5222 pub fn sent_shutdown(&self) -> bool {
5223 self.context.channel_state.is_local_shutdown_sent()
5226 /// Returns true if this channel is fully shut down. True here implies that no further actions
5227 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5228 /// will be handled appropriately by the chain monitor.
5229 pub fn is_shutdown(&self) -> bool {
5230 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5233 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5234 self.context.channel_update_status
5237 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5238 self.context.update_time_counter += 1;
5239 self.context.channel_update_status = status;
5242 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5244 // * always when a new block/transactions are confirmed with the new height
5245 // * when funding is signed with a height of 0
5246 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5250 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5251 if funding_tx_confirmations <= 0 {
5252 self.context.funding_tx_confirmation_height = 0;
5255 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5259 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5260 // channel_ready yet.
5261 if self.context.signer_pending_funding {
5265 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5266 // channel_ready until the entire batch is ready.
5267 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
5268 self.context.channel_state.set_our_channel_ready();
5270 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5271 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5272 self.context.update_time_counter += 1;
5274 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5275 // We got a reorg but not enough to trigger a force close, just ignore.
5278 if self.context.funding_tx_confirmation_height != 0 &&
5279 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5281 // We should never see a funding transaction on-chain until we've received
5282 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5283 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5284 // however, may do this and we shouldn't treat it as a bug.
5285 #[cfg(not(fuzzing))]
5286 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5287 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5288 self.context.channel_state.to_u32());
5290 // We got a reorg but not enough to trigger a force close, just ignore.
5294 if need_commitment_update {
5295 if !self.context.channel_state.is_monitor_update_in_progress() {
5296 if !self.context.channel_state.is_peer_disconnected() {
5297 let next_per_commitment_point =
5298 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5299 return Some(msgs::ChannelReady {
5300 channel_id: self.context.channel_id,
5301 next_per_commitment_point,
5302 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5306 self.context.monitor_pending_channel_ready = true;
5312 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5313 /// In the first case, we store the confirmation height and calculating the short channel id.
5314 /// In the second, we simply return an Err indicating we need to be force-closed now.
5315 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5316 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5317 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5318 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5320 NS::Target: NodeSigner,
5323 let mut msgs = (None, None);
5324 if let Some(funding_txo) = self.context.get_funding_txo() {
5325 for &(index_in_block, tx) in txdata.iter() {
5326 // Check if the transaction is the expected funding transaction, and if it is,
5327 // check that it pays the right amount to the right script.
5328 if self.context.funding_tx_confirmation_height == 0 {
5329 if tx.txid() == funding_txo.txid {
5330 let txo_idx = funding_txo.index as usize;
5331 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5332 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5333 if self.context.is_outbound() {
5334 // If we generated the funding transaction and it doesn't match what it
5335 // should, the client is really broken and we should just panic and
5336 // tell them off. That said, because hash collisions happen with high
5337 // probability in fuzzing mode, if we're fuzzing we just close the
5338 // channel and move on.
5339 #[cfg(not(fuzzing))]
5340 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5342 self.context.update_time_counter += 1;
5343 let err_reason = "funding tx had wrong script/value or output index";
5344 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5346 if self.context.is_outbound() {
5347 if !tx.is_coin_base() {
5348 for input in tx.input.iter() {
5349 if input.witness.is_empty() {
5350 // We generated a malleable funding transaction, implying we've
5351 // just exposed ourselves to funds loss to our counterparty.
5352 #[cfg(not(fuzzing))]
5353 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5358 self.context.funding_tx_confirmation_height = height;
5359 self.context.funding_tx_confirmed_in = Some(*block_hash);
5360 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5361 Ok(scid) => Some(scid),
5362 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5365 // If this is a coinbase transaction and not a 0-conf channel
5366 // we should update our min_depth to 100 to handle coinbase maturity
5367 if tx.is_coin_base() &&
5368 self.context.minimum_depth.unwrap_or(0) > 0 &&
5369 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5370 self.context.minimum_depth = Some(COINBASE_MATURITY);
5373 // If we allow 1-conf funding, we may need to check for channel_ready here and
5374 // send it immediately instead of waiting for a best_block_updated call (which
5375 // may have already happened for this block).
5376 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5377 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5378 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5379 msgs = (Some(channel_ready), announcement_sigs);
5382 for inp in tx.input.iter() {
5383 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5384 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5385 return Err(ClosureReason::CommitmentTxConfirmed);
5393 /// When a new block is connected, we check the height of the block against outbound holding
5394 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5395 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5396 /// handled by the ChannelMonitor.
5398 /// If we return Err, the channel may have been closed, at which point the standard
5399 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5402 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5404 pub fn best_block_updated<NS: Deref, L: Deref>(
5405 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5406 node_signer: &NS, user_config: &UserConfig, logger: &L
5407 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5409 NS::Target: NodeSigner,
5412 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5415 fn do_best_block_updated<NS: Deref, L: Deref>(
5416 &mut self, height: u32, highest_header_time: u32,
5417 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5418 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5420 NS::Target: NodeSigner,
5423 let mut timed_out_htlcs = Vec::new();
5424 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5425 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5427 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5428 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5430 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5431 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5432 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5440 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5442 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5443 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5444 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5446 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5447 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5450 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5451 self.context.channel_state.is_our_channel_ready() {
5452 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5453 if self.context.funding_tx_confirmation_height == 0 {
5454 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5455 // zero if it has been reorged out, however in either case, our state flags
5456 // indicate we've already sent a channel_ready
5457 funding_tx_confirmations = 0;
5460 // If we've sent channel_ready (or have both sent and received channel_ready), and
5461 // the funding transaction has become unconfirmed,
5462 // close the channel and hope we can get the latest state on chain (because presumably
5463 // the funding transaction is at least still in the mempool of most nodes).
5465 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5466 // 0-conf channel, but not doing so may lead to the
5467 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5469 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5470 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5471 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5472 return Err(ClosureReason::ProcessingError { err: err_reason });
5474 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5475 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5476 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5477 // If funding_tx_confirmed_in is unset, the channel must not be active
5478 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5479 assert!(!self.context.channel_state.is_our_channel_ready());
5480 return Err(ClosureReason::FundingTimedOut);
5483 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5484 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5486 Ok((None, timed_out_htlcs, announcement_sigs))
5489 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5490 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5491 /// before the channel has reached channel_ready and we can just wait for more blocks.
5492 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5493 if self.context.funding_tx_confirmation_height != 0 {
5494 // We handle the funding disconnection by calling best_block_updated with a height one
5495 // below where our funding was connected, implying a reorg back to conf_height - 1.
5496 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5497 // We use the time field to bump the current time we set on channel updates if its
5498 // larger. If we don't know that time has moved forward, we can just set it to the last
5499 // time we saw and it will be ignored.
5500 let best_time = self.context.update_time_counter;
5501 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
5502 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5503 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5504 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5505 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5511 // We never learned about the funding confirmation anyway, just ignore
5516 // Methods to get unprompted messages to send to the remote end (or where we already returned
5517 // something in the handler for the message that prompted this message):
5519 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5520 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5521 /// directions). Should be used for both broadcasted announcements and in response to an
5522 /// AnnouncementSignatures message from the remote peer.
5524 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5527 /// This will only return ChannelError::Ignore upon failure.
5529 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5530 fn get_channel_announcement<NS: Deref>(
5531 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5532 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5533 if !self.context.config.announced_channel {
5534 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5536 if !self.context.is_usable() {
5537 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5540 let short_channel_id = self.context.get_short_channel_id()
5541 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5542 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5543 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5544 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5545 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5547 let msg = msgs::UnsignedChannelAnnouncement {
5548 features: channelmanager::provided_channel_features(&user_config),
5551 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5552 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5553 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5554 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5555 excess_data: Vec::new(),
5561 fn get_announcement_sigs<NS: Deref, L: Deref>(
5562 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5563 best_block_height: u32, logger: &L
5564 ) -> Option<msgs::AnnouncementSignatures>
5566 NS::Target: NodeSigner,
5569 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5573 if !self.context.is_usable() {
5577 if self.context.channel_state.is_peer_disconnected() {
5578 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5582 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5586 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5587 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5590 log_trace!(logger, "{:?}", e);
5594 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5596 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5601 match &self.context.holder_signer {
5602 ChannelSignerType::Ecdsa(ecdsa) => {
5603 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5605 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5610 let short_channel_id = match self.context.get_short_channel_id() {
5612 None => return None,
5615 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5617 Some(msgs::AnnouncementSignatures {
5618 channel_id: self.context.channel_id(),
5620 node_signature: our_node_sig,
5621 bitcoin_signature: our_bitcoin_sig,
5624 // TODO (taproot|arik)
5630 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5632 fn sign_channel_announcement<NS: Deref>(
5633 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5634 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5635 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5636 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5637 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5638 let were_node_one = announcement.node_id_1 == our_node_key;
5640 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5641 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5642 match &self.context.holder_signer {
5643 ChannelSignerType::Ecdsa(ecdsa) => {
5644 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5645 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5646 Ok(msgs::ChannelAnnouncement {
5647 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5648 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5649 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5650 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5651 contents: announcement,
5654 // TODO (taproot|arik)
5659 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5663 /// Processes an incoming announcement_signatures message, providing a fully-signed
5664 /// channel_announcement message which we can broadcast and storing our counterparty's
5665 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5666 pub fn announcement_signatures<NS: Deref>(
5667 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5668 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5669 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5670 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5672 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5674 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5675 return Err(ChannelError::Close(format!(
5676 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5677 &announcement, self.context.get_counterparty_node_id())));
5679 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5680 return Err(ChannelError::Close(format!(
5681 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5682 &announcement, self.context.counterparty_funding_pubkey())));
5685 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5686 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5687 return Err(ChannelError::Ignore(
5688 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5691 self.sign_channel_announcement(node_signer, announcement)
5694 /// Gets a signed channel_announcement for this channel, if we previously received an
5695 /// announcement_signatures from our counterparty.
5696 pub fn get_signed_channel_announcement<NS: Deref>(
5697 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5698 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5699 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5702 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5704 Err(_) => return None,
5706 match self.sign_channel_announcement(node_signer, announcement) {
5707 Ok(res) => Some(res),
5712 /// May panic if called on a channel that wasn't immediately-previously
5713 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5714 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5715 assert!(self.context.channel_state.is_peer_disconnected());
5716 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5717 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5718 // current to_remote balances. However, it no longer has any use, and thus is now simply
5719 // set to a dummy (but valid, as required by the spec) public key.
5720 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5721 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5722 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5723 let mut pk = [2; 33]; pk[1] = 0xff;
5724 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5725 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5726 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5727 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5730 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5733 self.mark_awaiting_response();
5734 msgs::ChannelReestablish {
5735 channel_id: self.context.channel_id(),
5736 // The protocol has two different commitment number concepts - the "commitment
5737 // transaction number", which starts from 0 and counts up, and the "revocation key
5738 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5739 // commitment transaction numbers by the index which will be used to reveal the
5740 // revocation key for that commitment transaction, which means we have to convert them
5741 // to protocol-level commitment numbers here...
5743 // next_local_commitment_number is the next commitment_signed number we expect to
5744 // receive (indicating if they need to resend one that we missed).
5745 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5746 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5747 // receive, however we track it by the next commitment number for a remote transaction
5748 // (which is one further, as they always revoke previous commitment transaction, not
5749 // the one we send) so we have to decrement by 1. Note that if
5750 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5751 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
5753 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5754 your_last_per_commitment_secret: remote_last_secret,
5755 my_current_per_commitment_point: dummy_pubkey,
5756 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5757 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5758 // txid of that interactive transaction, else we MUST NOT set it.
5759 next_funding_txid: None,
5764 // Send stuff to our remote peers:
5766 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5767 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5768 /// commitment update.
5770 /// `Err`s will only be [`ChannelError::Ignore`].
5771 pub fn queue_add_htlc<F: Deref, L: Deref>(
5772 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5773 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5774 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5775 ) -> Result<(), ChannelError>
5776 where F::Target: FeeEstimator, L::Target: Logger
5779 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5780 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5781 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5783 if let ChannelError::Ignore(_) = err { /* fine */ }
5784 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5789 /// Adds a pending outbound HTLC to this channel, note that you probably want
5790 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5792 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5794 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5795 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5797 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5798 /// we may not yet have sent the previous commitment update messages and will need to
5799 /// regenerate them.
5801 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5802 /// on this [`Channel`] if `force_holding_cell` is false.
5804 /// `Err`s will only be [`ChannelError::Ignore`].
5805 fn send_htlc<F: Deref, L: Deref>(
5806 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5807 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5808 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5809 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5810 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5811 where F::Target: FeeEstimator, L::Target: Logger
5813 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5814 self.context.channel_state.is_local_shutdown_sent() ||
5815 self.context.channel_state.is_remote_shutdown_sent()
5817 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5819 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5820 if amount_msat > channel_total_msat {
5821 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5824 if amount_msat == 0 {
5825 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5828 let available_balances = self.context.get_available_balances(fee_estimator);
5829 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5830 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5831 available_balances.next_outbound_htlc_minimum_msat)));
5834 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5835 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5836 available_balances.next_outbound_htlc_limit_msat)));
5839 if self.context.channel_state.is_peer_disconnected() {
5840 // Note that this should never really happen, if we're !is_live() on receipt of an
5841 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5842 // the user to send directly into a !is_live() channel. However, if we
5843 // disconnected during the time the previous hop was doing the commitment dance we may
5844 // end up getting here after the forwarding delay. In any case, returning an
5845 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5846 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5849 let need_holding_cell = self.context.channel_state.should_force_holding_cell();
5850 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5851 payment_hash, amount_msat,
5852 if force_holding_cell { "into holding cell" }
5853 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5854 else { "to peer" });
5856 if need_holding_cell {
5857 force_holding_cell = true;
5860 // Now update local state:
5861 if force_holding_cell {
5862 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5867 onion_routing_packet,
5874 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5875 htlc_id: self.context.next_holder_htlc_id,
5877 payment_hash: payment_hash.clone(),
5879 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5885 let res = msgs::UpdateAddHTLC {
5886 channel_id: self.context.channel_id,
5887 htlc_id: self.context.next_holder_htlc_id,
5891 onion_routing_packet,
5895 self.context.next_holder_htlc_id += 1;
5900 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5901 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5902 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5903 // fail to generate this, we still are at least at a position where upgrading their status
5905 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5906 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5907 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5909 if let Some(state) = new_state {
5910 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5914 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5915 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5916 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5917 // Grab the preimage, if it exists, instead of cloning
5918 let mut reason = OutboundHTLCOutcome::Success(None);
5919 mem::swap(outcome, &mut reason);
5920 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5923 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5924 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5925 debug_assert!(!self.context.is_outbound());
5926 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5927 self.context.feerate_per_kw = feerate;
5928 self.context.pending_update_fee = None;
5931 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5933 let (mut htlcs_ref, counterparty_commitment_tx) =
5934 self.build_commitment_no_state_update(logger);
5935 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5936 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5937 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5939 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5940 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5943 self.context.latest_monitor_update_id += 1;
5944 let monitor_update = ChannelMonitorUpdate {
5945 update_id: self.context.latest_monitor_update_id,
5946 counterparty_node_id: Some(self.context.counterparty_node_id),
5947 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5948 commitment_txid: counterparty_commitment_txid,
5949 htlc_outputs: htlcs.clone(),
5950 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5951 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5952 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5953 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5954 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5957 self.context.channel_state.set_awaiting_remote_revoke();
5961 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5962 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5963 where L::Target: Logger
5965 let counterparty_keys = self.context.build_remote_transaction_keys();
5966 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5967 let counterparty_commitment_tx = commitment_stats.tx;
5969 #[cfg(any(test, fuzzing))]
5971 if !self.context.is_outbound() {
5972 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5973 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5974 if let Some(info) = projected_commit_tx_info {
5975 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5976 if info.total_pending_htlcs == total_pending_htlcs
5977 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5978 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5979 && info.feerate == self.context.feerate_per_kw {
5980 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5981 assert_eq!(actual_fee, info.fee);
5987 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5990 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5991 /// generation when we shouldn't change HTLC/channel state.
5992 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5993 // Get the fee tests from `build_commitment_no_state_update`
5994 #[cfg(any(test, fuzzing))]
5995 self.build_commitment_no_state_update(logger);
5997 let counterparty_keys = self.context.build_remote_transaction_keys();
5998 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5999 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
6001 match &self.context.holder_signer {
6002 ChannelSignerType::Ecdsa(ecdsa) => {
6003 let (signature, htlc_signatures);
6006 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
6007 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
6011 let res = ecdsa.sign_counterparty_commitment(
6012 &commitment_stats.tx,
6013 commitment_stats.inbound_htlc_preimages,
6014 commitment_stats.outbound_htlc_preimages,
6015 &self.context.secp_ctx,
6016 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
6018 htlc_signatures = res.1;
6020 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
6021 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
6022 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
6023 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
6025 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6026 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6027 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6028 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
6029 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
6030 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
6034 Ok((msgs::CommitmentSigned {
6035 channel_id: self.context.channel_id,
6039 partial_signature_with_nonce: None,
6040 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6042 // TODO (taproot|arik)
6048 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6049 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6051 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6052 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6053 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
6054 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
6055 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6056 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6057 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
6058 where F::Target: FeeEstimator, L::Target: Logger
6060 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
6061 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
6062 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6065 let monitor_update = self.build_commitment_no_status_check(logger);
6066 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6067 Ok(self.push_ret_blockable_mon_update(monitor_update))
6073 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
6075 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
6076 let new_forwarding_info = Some(CounterpartyForwardingInfo {
6077 fee_base_msat: msg.contents.fee_base_msat,
6078 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6079 cltv_expiry_delta: msg.contents.cltv_expiry_delta
6081 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
6083 self.context.counterparty_forwarding_info = new_forwarding_info;
6089 /// Begins the shutdown process, getting a message for the remote peer and returning all
6090 /// holding cell HTLCs for payment failure.
6091 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6092 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6093 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6095 for htlc in self.context.pending_outbound_htlcs.iter() {
6096 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6097 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6100 if self.context.channel_state.is_local_shutdown_sent() {
6101 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6103 else if self.context.channel_state.is_remote_shutdown_sent() {
6104 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6106 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6107 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6109 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6110 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6111 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6114 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6117 // use override shutdown script if provided
6118 let shutdown_scriptpubkey = match override_shutdown_script {
6119 Some(script) => script,
6121 // otherwise, use the shutdown scriptpubkey provided by the signer
6122 match signer_provider.get_shutdown_scriptpubkey() {
6123 Ok(scriptpubkey) => scriptpubkey,
6124 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6128 if !shutdown_scriptpubkey.is_compatible(their_features) {
6129 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6131 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6136 // From here on out, we may not fail!
6137 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6138 self.context.channel_state.set_local_shutdown_sent();
6139 self.context.update_time_counter += 1;
6141 let monitor_update = if update_shutdown_script {
6142 self.context.latest_monitor_update_id += 1;
6143 let monitor_update = ChannelMonitorUpdate {
6144 update_id: self.context.latest_monitor_update_id,
6145 counterparty_node_id: Some(self.context.counterparty_node_id),
6146 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6147 scriptpubkey: self.get_closing_scriptpubkey(),
6150 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6151 self.push_ret_blockable_mon_update(monitor_update)
6153 let shutdown = msgs::Shutdown {
6154 channel_id: self.context.channel_id,
6155 scriptpubkey: self.get_closing_scriptpubkey(),
6158 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6159 // our shutdown until we've committed all of the pending changes.
6160 self.context.holding_cell_update_fee = None;
6161 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6162 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6164 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6165 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6172 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6173 "we can't both complete shutdown and return a monitor update");
6175 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6178 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6179 self.context.holding_cell_htlc_updates.iter()
6180 .flat_map(|htlc_update| {
6182 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6183 => Some((source, payment_hash)),
6187 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6191 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6192 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6193 pub context: ChannelContext<SP>,
6194 pub unfunded_context: UnfundedChannelContext,
6197 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6198 pub fn new<ES: Deref, F: Deref>(
6199 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6200 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6201 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6202 ) -> Result<OutboundV1Channel<SP>, APIError>
6203 where ES::Target: EntropySource,
6204 F::Target: FeeEstimator
6206 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6207 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6208 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6209 let pubkeys = holder_signer.pubkeys().clone();
6211 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6212 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6214 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6215 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6217 let channel_value_msat = channel_value_satoshis * 1000;
6218 if push_msat > channel_value_msat {
6219 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6221 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6222 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6224 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6225 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6226 // Protocol level safety check in place, although it should never happen because
6227 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6228 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6231 let channel_type = Self::get_initial_channel_type(&config, their_features);
6232 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6234 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6235 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6237 (ConfirmationTarget::NonAnchorChannelFee, 0)
6239 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6241 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6242 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6243 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6244 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6247 let mut secp_ctx = Secp256k1::new();
6248 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6250 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6251 match signer_provider.get_shutdown_scriptpubkey() {
6252 Ok(scriptpubkey) => Some(scriptpubkey),
6253 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6257 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6258 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6259 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6263 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6264 Ok(script) => script,
6265 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6268 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6271 context: ChannelContext {
6274 config: LegacyChannelConfig {
6275 options: config.channel_config.clone(),
6276 announced_channel: config.channel_handshake_config.announced_channel,
6277 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6282 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6284 channel_id: temporary_channel_id,
6285 temporary_channel_id: Some(temporary_channel_id),
6286 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6287 announcement_sigs_state: AnnouncementSigsState::NotSent,
6289 channel_value_satoshis,
6291 latest_monitor_update_id: 0,
6293 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6294 shutdown_scriptpubkey,
6297 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6298 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6301 pending_inbound_htlcs: Vec::new(),
6302 pending_outbound_htlcs: Vec::new(),
6303 holding_cell_htlc_updates: Vec::new(),
6304 pending_update_fee: None,
6305 holding_cell_update_fee: None,
6306 next_holder_htlc_id: 0,
6307 next_counterparty_htlc_id: 0,
6308 update_time_counter: 1,
6310 resend_order: RAACommitmentOrder::CommitmentFirst,
6312 monitor_pending_channel_ready: false,
6313 monitor_pending_revoke_and_ack: false,
6314 monitor_pending_commitment_signed: false,
6315 monitor_pending_forwards: Vec::new(),
6316 monitor_pending_failures: Vec::new(),
6317 monitor_pending_finalized_fulfills: Vec::new(),
6319 signer_pending_commitment_update: false,
6320 signer_pending_funding: false,
6322 #[cfg(debug_assertions)]
6323 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6324 #[cfg(debug_assertions)]
6325 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6327 last_sent_closing_fee: None,
6328 pending_counterparty_closing_signed: None,
6329 expecting_peer_commitment_signed: false,
6330 closing_fee_limits: None,
6331 target_closing_feerate_sats_per_kw: None,
6333 funding_tx_confirmed_in: None,
6334 funding_tx_confirmation_height: 0,
6335 short_channel_id: None,
6336 channel_creation_height: current_chain_height,
6338 feerate_per_kw: commitment_feerate,
6339 counterparty_dust_limit_satoshis: 0,
6340 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6341 counterparty_max_htlc_value_in_flight_msat: 0,
6342 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6343 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6344 holder_selected_channel_reserve_satoshis,
6345 counterparty_htlc_minimum_msat: 0,
6346 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6347 counterparty_max_accepted_htlcs: 0,
6348 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6349 minimum_depth: None, // Filled in in accept_channel
6351 counterparty_forwarding_info: None,
6353 channel_transaction_parameters: ChannelTransactionParameters {
6354 holder_pubkeys: pubkeys,
6355 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6356 is_outbound_from_holder: true,
6357 counterparty_parameters: None,
6358 funding_outpoint: None,
6359 channel_type_features: channel_type.clone()
6361 funding_transaction: None,
6362 is_batch_funding: None,
6364 counterparty_cur_commitment_point: None,
6365 counterparty_prev_commitment_point: None,
6366 counterparty_node_id,
6368 counterparty_shutdown_scriptpubkey: None,
6370 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6372 channel_update_status: ChannelUpdateStatus::Enabled,
6373 closing_signed_in_flight: false,
6375 announcement_sigs: None,
6377 #[cfg(any(test, fuzzing))]
6378 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6379 #[cfg(any(test, fuzzing))]
6380 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6382 workaround_lnd_bug_4006: None,
6383 sent_message_awaiting_response: None,
6385 latest_inbound_scid_alias: None,
6386 outbound_scid_alias,
6388 channel_pending_event_emitted: false,
6389 channel_ready_event_emitted: false,
6391 #[cfg(any(test, fuzzing))]
6392 historical_inbound_htlc_fulfills: HashSet::new(),
6397 blocked_monitor_updates: Vec::new(),
6399 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6403 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
6404 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6405 let counterparty_keys = self.context.build_remote_transaction_keys();
6406 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6407 let signature = match &self.context.holder_signer {
6408 // TODO (taproot|arik): move match into calling method for Taproot
6409 ChannelSignerType::Ecdsa(ecdsa) => {
6410 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
6411 .map(|(sig, _)| sig).ok()?
6413 // TODO (taproot|arik)
6418 if self.context.signer_pending_funding {
6419 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
6420 self.context.signer_pending_funding = false;
6423 Some(msgs::FundingCreated {
6424 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
6425 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
6426 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
6429 partial_signature_with_nonce: None,
6431 next_local_nonce: None,
6435 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6436 /// a funding_created message for the remote peer.
6437 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6438 /// or if called on an inbound channel.
6439 /// Note that channel_id changes during this call!
6440 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6441 /// If an Err is returned, it is a ChannelError::Close.
6442 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6443 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
6444 if !self.context.is_outbound() {
6445 panic!("Tried to create outbound funding_created message on an inbound channel!");
6448 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6449 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6451 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6453 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6454 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6455 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6456 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6459 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6460 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6462 // Now that we're past error-generating stuff, update our local state:
6464 self.context.channel_state = ChannelState::FundingNegotiated;
6465 self.context.channel_id = funding_txo.to_channel_id();
6467 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6468 // We can skip this if it is a zero-conf channel.
6469 if funding_transaction.is_coin_base() &&
6470 self.context.minimum_depth.unwrap_or(0) > 0 &&
6471 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6472 self.context.minimum_depth = Some(COINBASE_MATURITY);
6475 self.context.funding_transaction = Some(funding_transaction);
6476 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6478 let funding_created = self.get_funding_created_msg(logger);
6479 if funding_created.is_none() {
6480 #[cfg(not(async_signing))] {
6481 panic!("Failed to get signature for new funding creation");
6483 #[cfg(async_signing)] {
6484 if !self.context.signer_pending_funding {
6485 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6486 self.context.signer_pending_funding = true;
6494 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6495 // The default channel type (ie the first one we try) depends on whether the channel is
6496 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6497 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6498 // with no other changes, and fall back to `only_static_remotekey`.
6499 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6500 if !config.channel_handshake_config.announced_channel &&
6501 config.channel_handshake_config.negotiate_scid_privacy &&
6502 their_features.supports_scid_privacy() {
6503 ret.set_scid_privacy_required();
6506 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6507 // set it now. If they don't understand it, we'll fall back to our default of
6508 // `only_static_remotekey`.
6509 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6510 their_features.supports_anchors_zero_fee_htlc_tx() {
6511 ret.set_anchors_zero_fee_htlc_tx_required();
6517 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6518 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6519 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6520 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6521 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6522 ) -> Result<msgs::OpenChannel, ()>
6524 F::Target: FeeEstimator
6526 if !self.context.is_outbound() ||
6528 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6529 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6534 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6535 // We've exhausted our options
6538 // We support opening a few different types of channels. Try removing our additional
6539 // features one by one until we've either arrived at our default or the counterparty has
6542 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6543 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6544 // checks whether the counterparty supports every feature, this would only happen if the
6545 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6547 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6548 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6549 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6550 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6551 } else if self.context.channel_type.supports_scid_privacy() {
6552 self.context.channel_type.clear_scid_privacy();
6554 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6556 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6557 Ok(self.get_open_channel(chain_hash))
6560 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6561 if !self.context.is_outbound() {
6562 panic!("Tried to open a channel for an inbound channel?");
6564 if self.context.have_received_message() {
6565 panic!("Cannot generate an open_channel after we've moved forward");
6568 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6569 panic!("Tried to send an open_channel for a channel that has already advanced");
6572 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6573 let keys = self.context.get_holder_pubkeys();
6577 temporary_channel_id: self.context.channel_id,
6578 funding_satoshis: self.context.channel_value_satoshis,
6579 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6580 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6581 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6582 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6583 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6584 feerate_per_kw: self.context.feerate_per_kw as u32,
6585 to_self_delay: self.context.get_holder_selected_contest_delay(),
6586 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6587 funding_pubkey: keys.funding_pubkey,
6588 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6589 payment_point: keys.payment_point,
6590 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6591 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6592 first_per_commitment_point,
6593 channel_flags: if self.context.config.announced_channel {1} else {0},
6594 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6595 Some(script) => script.clone().into_inner(),
6596 None => Builder::new().into_script(),
6598 channel_type: Some(self.context.channel_type.clone()),
6603 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6604 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6606 // Check sanity of message fields:
6607 if !self.context.is_outbound() {
6608 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6610 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
6611 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6613 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6614 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6616 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6617 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6619 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6620 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6622 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6623 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6624 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6626 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6627 if msg.htlc_minimum_msat >= full_channel_value_msat {
6628 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6630 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6631 if msg.to_self_delay > max_delay_acceptable {
6632 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6634 if msg.max_accepted_htlcs < 1 {
6635 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6637 if msg.max_accepted_htlcs > MAX_HTLCS {
6638 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6641 // Now check against optional parameters as set by config...
6642 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6643 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6645 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6646 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6648 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6649 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6651 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6652 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6654 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6655 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6657 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6658 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6660 if msg.minimum_depth > peer_limits.max_minimum_depth {
6661 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6664 if let Some(ty) = &msg.channel_type {
6665 if *ty != self.context.channel_type {
6666 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6668 } else if their_features.supports_channel_type() {
6669 // Assume they've accepted the channel type as they said they understand it.
6671 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6672 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6673 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6675 self.context.channel_type = channel_type.clone();
6676 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6679 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6680 match &msg.shutdown_scriptpubkey {
6681 &Some(ref script) => {
6682 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6683 if script.len() == 0 {
6686 if !script::is_bolt2_compliant(&script, their_features) {
6687 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6689 Some(script.clone())
6692 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6694 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6699 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6700 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6701 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6702 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6703 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6705 if peer_limits.trust_own_funding_0conf {
6706 self.context.minimum_depth = Some(msg.minimum_depth);
6708 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6711 let counterparty_pubkeys = ChannelPublicKeys {
6712 funding_pubkey: msg.funding_pubkey,
6713 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6714 payment_point: msg.payment_point,
6715 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6716 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6719 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6720 selected_contest_delay: msg.to_self_delay,
6721 pubkeys: counterparty_pubkeys,
6724 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6725 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6727 self.context.channel_state = ChannelState::NegotiatingFunding(
6728 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6730 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6735 /// Handles a funding_signed message from the remote end.
6736 /// If this call is successful, broadcast the funding transaction (and not before!)
6737 pub fn funding_signed<L: Deref>(
6738 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
6739 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
6743 if !self.context.is_outbound() {
6744 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
6746 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
6747 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
6749 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6750 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6751 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6752 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6755 let funding_script = self.context.get_funding_redeemscript();
6757 let counterparty_keys = self.context.build_remote_transaction_keys();
6758 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6759 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6760 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6762 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6763 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6765 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6766 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
6768 let trusted_tx = initial_commitment_tx.trust();
6769 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6770 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6771 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
6772 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
6773 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
6777 let holder_commitment_tx = HolderCommitmentTransaction::new(
6778 initial_commitment_tx,
6781 &self.context.get_holder_pubkeys().funding_pubkey,
6782 self.context.counterparty_funding_pubkey()
6786 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
6787 if validated.is_err() {
6788 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6791 let funding_redeemscript = self.context.get_funding_redeemscript();
6792 let funding_txo = self.context.get_funding_txo().unwrap();
6793 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6794 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6795 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6796 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6797 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6798 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6799 shutdown_script, self.context.get_holder_selected_contest_delay(),
6800 &self.context.destination_script, (funding_txo, funding_txo_script),
6801 &self.context.channel_transaction_parameters,
6802 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6804 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6805 channel_monitor.provide_initial_counterparty_commitment_tx(
6806 counterparty_initial_bitcoin_tx.txid, Vec::new(),
6807 self.context.cur_counterparty_commitment_transaction_number,
6808 self.context.counterparty_cur_commitment_point.unwrap(),
6809 counterparty_initial_commitment_tx.feerate_per_kw(),
6810 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6811 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6813 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
6814 if self.context.is_batch_funding() {
6815 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
6817 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
6819 self.context.cur_holder_commitment_transaction_number -= 1;
6820 self.context.cur_counterparty_commitment_transaction_number -= 1;
6822 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
6824 let mut channel = Channel { context: self.context };
6826 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6827 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6828 Ok((channel, channel_monitor))
6831 /// Indicates that the signer may have some signatures for us, so we should retry if we're
6833 #[cfg(async_signing)]
6834 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6835 if self.context.signer_pending_funding && self.context.is_outbound() {
6836 log_trace!(logger, "Signer unblocked a funding_created");
6837 self.get_funding_created_msg(logger)
6842 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6843 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6844 pub context: ChannelContext<SP>,
6845 pub unfunded_context: UnfundedChannelContext,
6848 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6849 /// Creates a new channel from a remote sides' request for one.
6850 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6851 pub fn new<ES: Deref, F: Deref, L: Deref>(
6852 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6853 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6854 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6855 current_chain_height: u32, logger: &L, is_0conf: bool,
6856 ) -> Result<InboundV1Channel<SP>, ChannelError>
6857 where ES::Target: EntropySource,
6858 F::Target: FeeEstimator,
6861 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6862 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6864 // First check the channel type is known, failing before we do anything else if we don't
6865 // support this channel type.
6866 let channel_type = if let Some(channel_type) = &msg.channel_type {
6867 if channel_type.supports_any_optional_bits() {
6868 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6871 // We only support the channel types defined by the `ChannelManager` in
6872 // `provided_channel_type_features`. The channel type must always support
6873 // `static_remote_key`.
6874 if !channel_type.requires_static_remote_key() {
6875 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6877 // Make sure we support all of the features behind the channel type.
6878 if !channel_type.is_subset(our_supported_features) {
6879 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6881 if channel_type.requires_scid_privacy() && announced_channel {
6882 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6884 channel_type.clone()
6886 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6887 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6888 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6893 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6894 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6895 let pubkeys = holder_signer.pubkeys().clone();
6896 let counterparty_pubkeys = ChannelPublicKeys {
6897 funding_pubkey: msg.funding_pubkey,
6898 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6899 payment_point: msg.payment_point,
6900 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6901 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6904 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6905 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6908 // Check sanity of message fields:
6909 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6910 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6912 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6913 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6915 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6916 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6918 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6919 if msg.push_msat > full_channel_value_msat {
6920 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6922 if msg.dust_limit_satoshis > msg.funding_satoshis {
6923 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6925 if msg.htlc_minimum_msat >= full_channel_value_msat {
6926 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6928 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6930 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6931 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6932 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6934 if msg.max_accepted_htlcs < 1 {
6935 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6937 if msg.max_accepted_htlcs > MAX_HTLCS {
6938 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6941 // Now check against optional parameters as set by config...
6942 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6943 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6945 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6946 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6948 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6949 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6951 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6952 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6954 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6955 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6957 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6958 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6960 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6961 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6964 // Convert things into internal flags and prep our state:
6966 if config.channel_handshake_limits.force_announced_channel_preference {
6967 if config.channel_handshake_config.announced_channel != announced_channel {
6968 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6972 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6973 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6974 // Protocol level safety check in place, although it should never happen because
6975 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6976 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6978 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6979 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6981 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6982 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6983 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6985 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6986 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6989 // check if the funder's amount for the initial commitment tx is sufficient
6990 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6991 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6992 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6996 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6997 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6998 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6999 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
7002 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
7003 // While it's reasonable for us to not meet the channel reserve initially (if they don't
7004 // want to push much to us), our counterparty should always have more than our reserve.
7005 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
7006 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
7009 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7010 match &msg.shutdown_scriptpubkey {
7011 &Some(ref script) => {
7012 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7013 if script.len() == 0 {
7016 if !script::is_bolt2_compliant(&script, their_features) {
7017 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
7019 Some(script.clone())
7022 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7024 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7029 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
7030 match signer_provider.get_shutdown_scriptpubkey() {
7031 Ok(scriptpubkey) => Some(scriptpubkey),
7032 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
7036 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
7037 if !shutdown_scriptpubkey.is_compatible(&their_features) {
7038 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
7042 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
7043 Ok(script) => script,
7044 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
7047 let mut secp_ctx = Secp256k1::new();
7048 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7050 let minimum_depth = if is_0conf {
7053 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
7057 context: ChannelContext {
7060 config: LegacyChannelConfig {
7061 options: config.channel_config.clone(),
7063 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
7068 inbound_handshake_limits_override: None,
7070 temporary_channel_id: Some(msg.temporary_channel_id),
7071 channel_id: msg.temporary_channel_id,
7072 channel_state: ChannelState::NegotiatingFunding(
7073 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7075 announcement_sigs_state: AnnouncementSigsState::NotSent,
7078 latest_monitor_update_id: 0,
7080 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7081 shutdown_scriptpubkey,
7084 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7085 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7086 value_to_self_msat: msg.push_msat,
7088 pending_inbound_htlcs: Vec::new(),
7089 pending_outbound_htlcs: Vec::new(),
7090 holding_cell_htlc_updates: Vec::new(),
7091 pending_update_fee: None,
7092 holding_cell_update_fee: None,
7093 next_holder_htlc_id: 0,
7094 next_counterparty_htlc_id: 0,
7095 update_time_counter: 1,
7097 resend_order: RAACommitmentOrder::CommitmentFirst,
7099 monitor_pending_channel_ready: false,
7100 monitor_pending_revoke_and_ack: false,
7101 monitor_pending_commitment_signed: false,
7102 monitor_pending_forwards: Vec::new(),
7103 monitor_pending_failures: Vec::new(),
7104 monitor_pending_finalized_fulfills: Vec::new(),
7106 signer_pending_commitment_update: false,
7107 signer_pending_funding: false,
7109 #[cfg(debug_assertions)]
7110 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7111 #[cfg(debug_assertions)]
7112 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7114 last_sent_closing_fee: None,
7115 pending_counterparty_closing_signed: None,
7116 expecting_peer_commitment_signed: false,
7117 closing_fee_limits: None,
7118 target_closing_feerate_sats_per_kw: None,
7120 funding_tx_confirmed_in: None,
7121 funding_tx_confirmation_height: 0,
7122 short_channel_id: None,
7123 channel_creation_height: current_chain_height,
7125 feerate_per_kw: msg.feerate_per_kw,
7126 channel_value_satoshis: msg.funding_satoshis,
7127 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
7128 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7129 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
7130 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
7131 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7132 holder_selected_channel_reserve_satoshis,
7133 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
7134 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7135 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
7136 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7139 counterparty_forwarding_info: None,
7141 channel_transaction_parameters: ChannelTransactionParameters {
7142 holder_pubkeys: pubkeys,
7143 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7144 is_outbound_from_holder: false,
7145 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7146 selected_contest_delay: msg.to_self_delay,
7147 pubkeys: counterparty_pubkeys,
7149 funding_outpoint: None,
7150 channel_type_features: channel_type.clone()
7152 funding_transaction: None,
7153 is_batch_funding: None,
7155 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7156 counterparty_prev_commitment_point: None,
7157 counterparty_node_id,
7159 counterparty_shutdown_scriptpubkey,
7161 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7163 channel_update_status: ChannelUpdateStatus::Enabled,
7164 closing_signed_in_flight: false,
7166 announcement_sigs: None,
7168 #[cfg(any(test, fuzzing))]
7169 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7170 #[cfg(any(test, fuzzing))]
7171 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7173 workaround_lnd_bug_4006: None,
7174 sent_message_awaiting_response: None,
7176 latest_inbound_scid_alias: None,
7177 outbound_scid_alias: 0,
7179 channel_pending_event_emitted: false,
7180 channel_ready_event_emitted: false,
7182 #[cfg(any(test, fuzzing))]
7183 historical_inbound_htlc_fulfills: HashSet::new(),
7188 blocked_monitor_updates: Vec::new(),
7190 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7196 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7197 /// should be sent back to the counterparty node.
7199 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7200 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7201 if self.context.is_outbound() {
7202 panic!("Tried to send accept_channel for an outbound channel?");
7205 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7206 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7208 panic!("Tried to send accept_channel after channel had moved forward");
7210 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7211 panic!("Tried to send an accept_channel for a channel that has already advanced");
7214 self.generate_accept_channel_message()
7217 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7218 /// inbound channel. If the intention is to accept an inbound channel, use
7219 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7221 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7222 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7223 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7224 let keys = self.context.get_holder_pubkeys();
7226 msgs::AcceptChannel {
7227 temporary_channel_id: self.context.channel_id,
7228 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7229 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7230 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7231 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7232 minimum_depth: self.context.minimum_depth.unwrap(),
7233 to_self_delay: self.context.get_holder_selected_contest_delay(),
7234 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7235 funding_pubkey: keys.funding_pubkey,
7236 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7237 payment_point: keys.payment_point,
7238 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7239 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7240 first_per_commitment_point,
7241 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7242 Some(script) => script.clone().into_inner(),
7243 None => Builder::new().into_script(),
7245 channel_type: Some(self.context.channel_type.clone()),
7247 next_local_nonce: None,
7251 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7252 /// inbound channel without accepting it.
7254 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7256 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7257 self.generate_accept_channel_message()
7260 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7261 let funding_script = self.context.get_funding_redeemscript();
7263 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7264 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7265 let trusted_tx = initial_commitment_tx.trust();
7266 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7267 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7268 // They sign the holder commitment transaction...
7269 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7270 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7271 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7272 encode::serialize_hex(&funding_script), &self.context.channel_id());
7273 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7275 Ok(initial_commitment_tx)
7278 pub fn funding_created<L: Deref>(
7279 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7280 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7284 if self.context.is_outbound() {
7285 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7288 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7289 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7291 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7292 // remember the channel, so it's safe to just send an error_message here and drop the
7294 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7296 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7297 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7298 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7299 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7302 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7303 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7304 // This is an externally observable change before we finish all our checks. In particular
7305 // check_funding_created_signature may fail.
7306 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7308 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7310 Err(ChannelError::Close(e)) => {
7311 self.context.channel_transaction_parameters.funding_outpoint = None;
7312 return Err((self, ChannelError::Close(e)));
7315 // The only error we know how to handle is ChannelError::Close, so we fall over here
7316 // to make sure we don't continue with an inconsistent state.
7317 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7321 let holder_commitment_tx = HolderCommitmentTransaction::new(
7322 initial_commitment_tx,
7325 &self.context.get_holder_pubkeys().funding_pubkey,
7326 self.context.counterparty_funding_pubkey()
7329 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7330 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7333 // Now that we're past error-generating stuff, update our local state:
7335 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7336 self.context.channel_id = funding_txo.to_channel_id();
7337 self.context.cur_counterparty_commitment_transaction_number -= 1;
7338 self.context.cur_holder_commitment_transaction_number -= 1;
7340 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7342 let funding_redeemscript = self.context.get_funding_redeemscript();
7343 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7344 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7345 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7346 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7347 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7348 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7349 shutdown_script, self.context.get_holder_selected_contest_delay(),
7350 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7351 &self.context.channel_transaction_parameters,
7352 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7354 holder_commitment_tx, best_block, self.context.counterparty_node_id);
7355 channel_monitor.provide_initial_counterparty_commitment_tx(
7356 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7357 self.context.cur_counterparty_commitment_transaction_number + 1,
7358 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7359 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7360 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7362 log_info!(logger, "{} funding_signed for peer for channel {}",
7363 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7365 // Promote the channel to a full-fledged one now that we have updated the state and have a
7366 // `ChannelMonitor`.
7367 let mut channel = Channel {
7368 context: self.context,
7370 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7371 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7373 Ok((channel, funding_signed, channel_monitor))
7377 const SERIALIZATION_VERSION: u8 = 3;
7378 const MIN_SERIALIZATION_VERSION: u8 = 3;
7380 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7386 impl Writeable for ChannelUpdateStatus {
7387 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7388 // We only care about writing out the current state as it was announced, ie only either
7389 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7390 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7392 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7393 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7394 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7395 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7401 impl Readable for ChannelUpdateStatus {
7402 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7403 Ok(match <u8 as Readable>::read(reader)? {
7404 0 => ChannelUpdateStatus::Enabled,
7405 1 => ChannelUpdateStatus::Disabled,
7406 _ => return Err(DecodeError::InvalidValue),
7411 impl Writeable for AnnouncementSigsState {
7412 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7413 // We only care about writing out the current state as if we had just disconnected, at
7414 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7416 AnnouncementSigsState::NotSent => 0u8.write(writer),
7417 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7418 AnnouncementSigsState::Committed => 0u8.write(writer),
7419 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7424 impl Readable for AnnouncementSigsState {
7425 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7426 Ok(match <u8 as Readable>::read(reader)? {
7427 0 => AnnouncementSigsState::NotSent,
7428 1 => AnnouncementSigsState::PeerReceived,
7429 _ => return Err(DecodeError::InvalidValue),
7434 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7435 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7436 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7439 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7441 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7442 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7443 // the low bytes now and the optional high bytes later.
7444 let user_id_low = self.context.user_id as u64;
7445 user_id_low.write(writer)?;
7447 // Version 1 deserializers expected to read parts of the config object here. Version 2
7448 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7449 // `minimum_depth` we simply write dummy values here.
7450 writer.write_all(&[0; 8])?;
7452 self.context.channel_id.write(writer)?;
7454 let mut channel_state = self.context.channel_state;
7455 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7456 channel_state.set_peer_disconnected();
7458 channel_state.to_u32().write(writer)?;
7460 self.context.channel_value_satoshis.write(writer)?;
7462 self.context.latest_monitor_update_id.write(writer)?;
7464 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7465 // deserialized from that format.
7466 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7467 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7468 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7470 self.context.destination_script.write(writer)?;
7472 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7473 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7474 self.context.value_to_self_msat.write(writer)?;
7476 let mut dropped_inbound_htlcs = 0;
7477 for htlc in self.context.pending_inbound_htlcs.iter() {
7478 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7479 dropped_inbound_htlcs += 1;
7482 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7483 for htlc in self.context.pending_inbound_htlcs.iter() {
7484 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7487 htlc.htlc_id.write(writer)?;
7488 htlc.amount_msat.write(writer)?;
7489 htlc.cltv_expiry.write(writer)?;
7490 htlc.payment_hash.write(writer)?;
7492 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7493 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7495 htlc_state.write(writer)?;
7497 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7499 htlc_state.write(writer)?;
7501 &InboundHTLCState::Committed => {
7504 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7506 removal_reason.write(writer)?;
7511 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7512 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7513 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7515 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7516 for htlc in self.context.pending_outbound_htlcs.iter() {
7517 htlc.htlc_id.write(writer)?;
7518 htlc.amount_msat.write(writer)?;
7519 htlc.cltv_expiry.write(writer)?;
7520 htlc.payment_hash.write(writer)?;
7521 htlc.source.write(writer)?;
7523 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7525 onion_packet.write(writer)?;
7527 &OutboundHTLCState::Committed => {
7530 &OutboundHTLCState::RemoteRemoved(_) => {
7531 // Treat this as a Committed because we haven't received the CS - they'll
7532 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7535 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7537 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7538 preimages.push(preimage);
7540 let reason: Option<&HTLCFailReason> = outcome.into();
7541 reason.write(writer)?;
7543 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7545 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7546 preimages.push(preimage);
7548 let reason: Option<&HTLCFailReason> = outcome.into();
7549 reason.write(writer)?;
7552 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7553 pending_outbound_blinding_points.push(htlc.blinding_point);
7556 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7557 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7558 // Vec of (htlc_id, failure_code, sha256_of_onion)
7559 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
7560 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7561 for update in self.context.holding_cell_htlc_updates.iter() {
7563 &HTLCUpdateAwaitingACK::AddHTLC {
7564 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7565 blinding_point, skimmed_fee_msat,
7568 amount_msat.write(writer)?;
7569 cltv_expiry.write(writer)?;
7570 payment_hash.write(writer)?;
7571 source.write(writer)?;
7572 onion_routing_packet.write(writer)?;
7574 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7575 holding_cell_blinding_points.push(blinding_point);
7577 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7579 payment_preimage.write(writer)?;
7580 htlc_id.write(writer)?;
7582 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7584 htlc_id.write(writer)?;
7585 err_packet.write(writer)?;
7587 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
7588 htlc_id, failure_code, sha256_of_onion
7590 // We don't want to break downgrading by adding a new variant, so write a dummy
7591 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
7592 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
7594 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
7596 htlc_id.write(writer)?;
7597 dummy_err_packet.write(writer)?;
7602 match self.context.resend_order {
7603 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7604 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7607 self.context.monitor_pending_channel_ready.write(writer)?;
7608 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7609 self.context.monitor_pending_commitment_signed.write(writer)?;
7611 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7612 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7613 pending_forward.write(writer)?;
7614 htlc_id.write(writer)?;
7617 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7618 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7619 htlc_source.write(writer)?;
7620 payment_hash.write(writer)?;
7621 fail_reason.write(writer)?;
7624 if self.context.is_outbound() {
7625 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7626 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7627 Some(feerate).write(writer)?;
7629 // As for inbound HTLCs, if the update was only announced and never committed in a
7630 // commitment_signed, drop it.
7631 None::<u32>.write(writer)?;
7633 self.context.holding_cell_update_fee.write(writer)?;
7635 self.context.next_holder_htlc_id.write(writer)?;
7636 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7637 self.context.update_time_counter.write(writer)?;
7638 self.context.feerate_per_kw.write(writer)?;
7640 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7641 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7642 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7643 // consider the stale state on reload.
7646 self.context.funding_tx_confirmed_in.write(writer)?;
7647 self.context.funding_tx_confirmation_height.write(writer)?;
7648 self.context.short_channel_id.write(writer)?;
7650 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7651 self.context.holder_dust_limit_satoshis.write(writer)?;
7652 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7654 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7655 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7657 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7658 self.context.holder_htlc_minimum_msat.write(writer)?;
7659 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7661 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7662 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7664 match &self.context.counterparty_forwarding_info {
7667 info.fee_base_msat.write(writer)?;
7668 info.fee_proportional_millionths.write(writer)?;
7669 info.cltv_expiry_delta.write(writer)?;
7671 None => 0u8.write(writer)?
7674 self.context.channel_transaction_parameters.write(writer)?;
7675 self.context.funding_transaction.write(writer)?;
7677 self.context.counterparty_cur_commitment_point.write(writer)?;
7678 self.context.counterparty_prev_commitment_point.write(writer)?;
7679 self.context.counterparty_node_id.write(writer)?;
7681 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7683 self.context.commitment_secrets.write(writer)?;
7685 self.context.channel_update_status.write(writer)?;
7687 #[cfg(any(test, fuzzing))]
7688 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7689 #[cfg(any(test, fuzzing))]
7690 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7691 htlc.write(writer)?;
7694 // If the channel type is something other than only-static-remote-key, then we need to have
7695 // older clients fail to deserialize this channel at all. If the type is
7696 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7698 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7699 Some(&self.context.channel_type) } else { None };
7701 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7702 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7703 // a different percentage of the channel value then 10%, which older versions of LDK used
7704 // to set it to before the percentage was made configurable.
7705 let serialized_holder_selected_reserve =
7706 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7707 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7709 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7710 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7711 let serialized_holder_htlc_max_in_flight =
7712 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7713 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7715 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7716 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7718 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7719 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7720 // we write the high bytes as an option here.
7721 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7723 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7725 write_tlv_fields!(writer, {
7726 (0, self.context.announcement_sigs, option),
7727 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7728 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7729 // them twice, once with their original default values above, and once as an option
7730 // here. On the read side, old versions will simply ignore the odd-type entries here,
7731 // and new versions map the default values to None and allow the TLV entries here to
7733 (1, self.context.minimum_depth, option),
7734 (2, chan_type, option),
7735 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7736 (4, serialized_holder_selected_reserve, option),
7737 (5, self.context.config, required),
7738 (6, serialized_holder_htlc_max_in_flight, option),
7739 (7, self.context.shutdown_scriptpubkey, option),
7740 (8, self.context.blocked_monitor_updates, optional_vec),
7741 (9, self.context.target_closing_feerate_sats_per_kw, option),
7742 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7743 (13, self.context.channel_creation_height, required),
7744 (15, preimages, required_vec),
7745 (17, self.context.announcement_sigs_state, required),
7746 (19, self.context.latest_inbound_scid_alias, option),
7747 (21, self.context.outbound_scid_alias, required),
7748 (23, channel_ready_event_emitted, option),
7749 (25, user_id_high_opt, option),
7750 (27, self.context.channel_keys_id, required),
7751 (28, holder_max_accepted_htlcs, option),
7752 (29, self.context.temporary_channel_id, option),
7753 (31, channel_pending_event_emitted, option),
7754 (35, pending_outbound_skimmed_fees, optional_vec),
7755 (37, holding_cell_skimmed_fees, optional_vec),
7756 (38, self.context.is_batch_funding, option),
7757 (39, pending_outbound_blinding_points, optional_vec),
7758 (41, holding_cell_blinding_points, optional_vec),
7759 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
7766 const MAX_ALLOC_SIZE: usize = 64*1024;
7767 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7769 ES::Target: EntropySource,
7770 SP::Target: SignerProvider
7772 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7773 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7774 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7776 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7777 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7778 // the low bytes now and the high bytes later.
7779 let user_id_low: u64 = Readable::read(reader)?;
7781 let mut config = Some(LegacyChannelConfig::default());
7783 // Read the old serialization of the ChannelConfig from version 0.0.98.
7784 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7785 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7786 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7787 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7789 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7790 let mut _val: u64 = Readable::read(reader)?;
7793 let channel_id = Readable::read(reader)?;
7794 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
7795 let channel_value_satoshis = Readable::read(reader)?;
7797 let latest_monitor_update_id = Readable::read(reader)?;
7799 let mut keys_data = None;
7801 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7802 // the `channel_keys_id` TLV is present below.
7803 let keys_len: u32 = Readable::read(reader)?;
7804 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7805 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7806 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7807 let mut data = [0; 1024];
7808 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7809 reader.read_exact(read_slice)?;
7810 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7814 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7815 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7816 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7819 let destination_script = Readable::read(reader)?;
7821 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7822 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7823 let value_to_self_msat = Readable::read(reader)?;
7825 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7827 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7828 for _ in 0..pending_inbound_htlc_count {
7829 pending_inbound_htlcs.push(InboundHTLCOutput {
7830 htlc_id: Readable::read(reader)?,
7831 amount_msat: Readable::read(reader)?,
7832 cltv_expiry: Readable::read(reader)?,
7833 payment_hash: Readable::read(reader)?,
7834 state: match <u8 as Readable>::read(reader)? {
7835 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7836 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7837 3 => InboundHTLCState::Committed,
7838 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7839 _ => return Err(DecodeError::InvalidValue),
7844 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7845 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7846 for _ in 0..pending_outbound_htlc_count {
7847 pending_outbound_htlcs.push(OutboundHTLCOutput {
7848 htlc_id: Readable::read(reader)?,
7849 amount_msat: Readable::read(reader)?,
7850 cltv_expiry: Readable::read(reader)?,
7851 payment_hash: Readable::read(reader)?,
7852 source: Readable::read(reader)?,
7853 state: match <u8 as Readable>::read(reader)? {
7854 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7855 1 => OutboundHTLCState::Committed,
7857 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7858 OutboundHTLCState::RemoteRemoved(option.into())
7861 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7862 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7865 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7866 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7868 _ => return Err(DecodeError::InvalidValue),
7870 skimmed_fee_msat: None,
7871 blinding_point: None,
7875 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7876 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7877 for _ in 0..holding_cell_htlc_update_count {
7878 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7879 0 => HTLCUpdateAwaitingACK::AddHTLC {
7880 amount_msat: Readable::read(reader)?,
7881 cltv_expiry: Readable::read(reader)?,
7882 payment_hash: Readable::read(reader)?,
7883 source: Readable::read(reader)?,
7884 onion_routing_packet: Readable::read(reader)?,
7885 skimmed_fee_msat: None,
7886 blinding_point: None,
7888 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7889 payment_preimage: Readable::read(reader)?,
7890 htlc_id: Readable::read(reader)?,
7892 2 => HTLCUpdateAwaitingACK::FailHTLC {
7893 htlc_id: Readable::read(reader)?,
7894 err_packet: Readable::read(reader)?,
7896 _ => return Err(DecodeError::InvalidValue),
7900 let resend_order = match <u8 as Readable>::read(reader)? {
7901 0 => RAACommitmentOrder::CommitmentFirst,
7902 1 => RAACommitmentOrder::RevokeAndACKFirst,
7903 _ => return Err(DecodeError::InvalidValue),
7906 let monitor_pending_channel_ready = Readable::read(reader)?;
7907 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7908 let monitor_pending_commitment_signed = Readable::read(reader)?;
7910 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7911 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7912 for _ in 0..monitor_pending_forwards_count {
7913 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7916 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7917 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7918 for _ in 0..monitor_pending_failures_count {
7919 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7922 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7924 let holding_cell_update_fee = Readable::read(reader)?;
7926 let next_holder_htlc_id = Readable::read(reader)?;
7927 let next_counterparty_htlc_id = Readable::read(reader)?;
7928 let update_time_counter = Readable::read(reader)?;
7929 let feerate_per_kw = Readable::read(reader)?;
7931 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7932 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7933 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7934 // consider the stale state on reload.
7935 match <u8 as Readable>::read(reader)? {
7938 let _: u32 = Readable::read(reader)?;
7939 let _: u64 = Readable::read(reader)?;
7940 let _: Signature = Readable::read(reader)?;
7942 _ => return Err(DecodeError::InvalidValue),
7945 let funding_tx_confirmed_in = Readable::read(reader)?;
7946 let funding_tx_confirmation_height = Readable::read(reader)?;
7947 let short_channel_id = Readable::read(reader)?;
7949 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7950 let holder_dust_limit_satoshis = Readable::read(reader)?;
7951 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7952 let mut counterparty_selected_channel_reserve_satoshis = None;
7954 // Read the old serialization from version 0.0.98.
7955 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7957 // Read the 8 bytes of backwards-compatibility data.
7958 let _dummy: u64 = Readable::read(reader)?;
7960 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7961 let holder_htlc_minimum_msat = Readable::read(reader)?;
7962 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7964 let mut minimum_depth = None;
7966 // Read the old serialization from version 0.0.98.
7967 minimum_depth = Some(Readable::read(reader)?);
7969 // Read the 4 bytes of backwards-compatibility data.
7970 let _dummy: u32 = Readable::read(reader)?;
7973 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7975 1 => Some(CounterpartyForwardingInfo {
7976 fee_base_msat: Readable::read(reader)?,
7977 fee_proportional_millionths: Readable::read(reader)?,
7978 cltv_expiry_delta: Readable::read(reader)?,
7980 _ => return Err(DecodeError::InvalidValue),
7983 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7984 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7986 let counterparty_cur_commitment_point = Readable::read(reader)?;
7988 let counterparty_prev_commitment_point = Readable::read(reader)?;
7989 let counterparty_node_id = Readable::read(reader)?;
7991 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7992 let commitment_secrets = Readable::read(reader)?;
7994 let channel_update_status = Readable::read(reader)?;
7996 #[cfg(any(test, fuzzing))]
7997 let mut historical_inbound_htlc_fulfills = HashSet::new();
7998 #[cfg(any(test, fuzzing))]
8000 let htlc_fulfills_len: u64 = Readable::read(reader)?;
8001 for _ in 0..htlc_fulfills_len {
8002 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
8006 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
8007 Some((feerate, if channel_parameters.is_outbound_from_holder {
8008 FeeUpdateState::Outbound
8010 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
8016 let mut announcement_sigs = None;
8017 let mut target_closing_feerate_sats_per_kw = None;
8018 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
8019 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
8020 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
8021 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
8022 // only, so we default to that if none was written.
8023 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
8024 let mut channel_creation_height = Some(serialized_height);
8025 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
8027 // If we read an old Channel, for simplicity we just treat it as "we never sent an
8028 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
8029 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
8030 let mut latest_inbound_scid_alias = None;
8031 let mut outbound_scid_alias = None;
8032 let mut channel_pending_event_emitted = None;
8033 let mut channel_ready_event_emitted = None;
8035 let mut user_id_high_opt: Option<u64> = None;
8036 let mut channel_keys_id: Option<[u8; 32]> = None;
8037 let mut temporary_channel_id: Option<ChannelId> = None;
8038 let mut holder_max_accepted_htlcs: Option<u16> = None;
8040 let mut blocked_monitor_updates = Some(Vec::new());
8042 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8043 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8045 let mut is_batch_funding: Option<()> = None;
8047 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8048 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8050 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8052 read_tlv_fields!(reader, {
8053 (0, announcement_sigs, option),
8054 (1, minimum_depth, option),
8055 (2, channel_type, option),
8056 (3, counterparty_selected_channel_reserve_satoshis, option),
8057 (4, holder_selected_channel_reserve_satoshis, option),
8058 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8059 (6, holder_max_htlc_value_in_flight_msat, option),
8060 (7, shutdown_scriptpubkey, option),
8061 (8, blocked_monitor_updates, optional_vec),
8062 (9, target_closing_feerate_sats_per_kw, option),
8063 (11, monitor_pending_finalized_fulfills, optional_vec),
8064 (13, channel_creation_height, option),
8065 (15, preimages_opt, optional_vec),
8066 (17, announcement_sigs_state, option),
8067 (19, latest_inbound_scid_alias, option),
8068 (21, outbound_scid_alias, option),
8069 (23, channel_ready_event_emitted, option),
8070 (25, user_id_high_opt, option),
8071 (27, channel_keys_id, option),
8072 (28, holder_max_accepted_htlcs, option),
8073 (29, temporary_channel_id, option),
8074 (31, channel_pending_event_emitted, option),
8075 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8076 (37, holding_cell_skimmed_fees_opt, optional_vec),
8077 (38, is_batch_funding, option),
8078 (39, pending_outbound_blinding_points_opt, optional_vec),
8079 (41, holding_cell_blinding_points_opt, optional_vec),
8080 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8083 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8084 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8085 // If we've gotten to the funding stage of the channel, populate the signer with its
8086 // required channel parameters.
8087 if channel_state >= ChannelState::FundingNegotiated {
8088 holder_signer.provide_channel_parameters(&channel_parameters);
8090 (channel_keys_id, holder_signer)
8092 // `keys_data` can be `None` if we had corrupted data.
8093 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8094 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8095 (holder_signer.channel_keys_id(), holder_signer)
8098 if let Some(preimages) = preimages_opt {
8099 let mut iter = preimages.into_iter();
8100 for htlc in pending_outbound_htlcs.iter_mut() {
8102 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8103 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8105 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8106 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8111 // We expect all preimages to be consumed above
8112 if iter.next().is_some() {
8113 return Err(DecodeError::InvalidValue);
8117 let chan_features = channel_type.as_ref().unwrap();
8118 if !chan_features.is_subset(our_supported_features) {
8119 // If the channel was written by a new version and negotiated with features we don't
8120 // understand yet, refuse to read it.
8121 return Err(DecodeError::UnknownRequiredFeature);
8124 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8125 // To account for that, we're proactively setting/overriding the field here.
8126 channel_parameters.channel_type_features = chan_features.clone();
8128 let mut secp_ctx = Secp256k1::new();
8129 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8131 // `user_id` used to be a single u64 value. In order to remain backwards
8132 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8133 // separate u64 values.
8134 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8136 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8138 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8139 let mut iter = skimmed_fees.into_iter();
8140 for htlc in pending_outbound_htlcs.iter_mut() {
8141 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8143 // We expect all skimmed fees to be consumed above
8144 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8146 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8147 let mut iter = skimmed_fees.into_iter();
8148 for htlc in holding_cell_htlc_updates.iter_mut() {
8149 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8150 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8153 // We expect all skimmed fees to be consumed above
8154 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8156 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8157 let mut iter = blinding_pts.into_iter();
8158 for htlc in pending_outbound_htlcs.iter_mut() {
8159 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8161 // We expect all blinding points to be consumed above
8162 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8164 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8165 let mut iter = blinding_pts.into_iter();
8166 for htlc in holding_cell_htlc_updates.iter_mut() {
8167 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8168 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8171 // We expect all blinding points to be consumed above
8172 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8175 if let Some(malformed_htlcs) = malformed_htlcs {
8176 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
8177 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
8178 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
8179 let matches = *htlc_id == malformed_htlc_id;
8180 if matches { debug_assert!(err_packet.data.is_empty()) }
8183 }).ok_or(DecodeError::InvalidValue)?;
8184 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
8185 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
8187 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
8192 context: ChannelContext {
8195 config: config.unwrap(),
8199 // Note that we don't care about serializing handshake limits as we only ever serialize
8200 // channel data after the handshake has completed.
8201 inbound_handshake_limits_override: None,
8204 temporary_channel_id,
8206 announcement_sigs_state: announcement_sigs_state.unwrap(),
8208 channel_value_satoshis,
8210 latest_monitor_update_id,
8212 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8213 shutdown_scriptpubkey,
8216 cur_holder_commitment_transaction_number,
8217 cur_counterparty_commitment_transaction_number,
8220 holder_max_accepted_htlcs,
8221 pending_inbound_htlcs,
8222 pending_outbound_htlcs,
8223 holding_cell_htlc_updates,
8227 monitor_pending_channel_ready,
8228 monitor_pending_revoke_and_ack,
8229 monitor_pending_commitment_signed,
8230 monitor_pending_forwards,
8231 monitor_pending_failures,
8232 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8234 signer_pending_commitment_update: false,
8235 signer_pending_funding: false,
8238 holding_cell_update_fee,
8239 next_holder_htlc_id,
8240 next_counterparty_htlc_id,
8241 update_time_counter,
8244 #[cfg(debug_assertions)]
8245 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8246 #[cfg(debug_assertions)]
8247 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8249 last_sent_closing_fee: None,
8250 pending_counterparty_closing_signed: None,
8251 expecting_peer_commitment_signed: false,
8252 closing_fee_limits: None,
8253 target_closing_feerate_sats_per_kw,
8255 funding_tx_confirmed_in,
8256 funding_tx_confirmation_height,
8258 channel_creation_height: channel_creation_height.unwrap(),
8260 counterparty_dust_limit_satoshis,
8261 holder_dust_limit_satoshis,
8262 counterparty_max_htlc_value_in_flight_msat,
8263 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8264 counterparty_selected_channel_reserve_satoshis,
8265 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8266 counterparty_htlc_minimum_msat,
8267 holder_htlc_minimum_msat,
8268 counterparty_max_accepted_htlcs,
8271 counterparty_forwarding_info,
8273 channel_transaction_parameters: channel_parameters,
8274 funding_transaction,
8277 counterparty_cur_commitment_point,
8278 counterparty_prev_commitment_point,
8279 counterparty_node_id,
8281 counterparty_shutdown_scriptpubkey,
8285 channel_update_status,
8286 closing_signed_in_flight: false,
8290 #[cfg(any(test, fuzzing))]
8291 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8292 #[cfg(any(test, fuzzing))]
8293 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8295 workaround_lnd_bug_4006: None,
8296 sent_message_awaiting_response: None,
8298 latest_inbound_scid_alias,
8299 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8300 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8302 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8303 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8305 #[cfg(any(test, fuzzing))]
8306 historical_inbound_htlc_fulfills,
8308 channel_type: channel_type.unwrap(),
8311 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8320 use bitcoin::blockdata::constants::ChainHash;
8321 use bitcoin::blockdata::script::{ScriptBuf, Builder};
8322 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8323 use bitcoin::blockdata::opcodes;
8324 use bitcoin::network::constants::Network;
8325 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
8326 use crate::ln::{PaymentHash, PaymentPreimage};
8327 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8328 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8329 use crate::ln::channel::InitFeatures;
8330 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8331 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8332 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8333 use crate::ln::msgs;
8334 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8335 use crate::ln::script::ShutdownScript;
8336 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8337 use crate::chain::BestBlock;
8338 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8339 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8340 use crate::chain::transaction::OutPoint;
8341 use crate::routing::router::{Path, RouteHop};
8342 use crate::util::config::UserConfig;
8343 use crate::util::errors::APIError;
8344 use crate::util::ser::{ReadableArgs, Writeable};
8345 use crate::util::test_utils;
8346 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8347 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8348 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8349 use bitcoin::secp256k1::{SecretKey,PublicKey};
8350 use bitcoin::hashes::sha256::Hash as Sha256;
8351 use bitcoin::hashes::Hash;
8352 use bitcoin::hashes::hex::FromHex;
8353 use bitcoin::hash_types::WPubkeyHash;
8354 use bitcoin::blockdata::locktime::absolute::LockTime;
8355 use bitcoin::address::{WitnessProgram, WitnessVersion};
8356 use crate::prelude::*;
8358 struct TestFeeEstimator {
8361 impl FeeEstimator for TestFeeEstimator {
8362 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8368 fn test_max_funding_satoshis_no_wumbo() {
8369 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8370 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8371 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8375 signer: InMemorySigner,
8378 impl EntropySource for Keys {
8379 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8382 impl SignerProvider for Keys {
8383 type EcdsaSigner = InMemorySigner;
8385 type TaprootSigner = InMemorySigner;
8387 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8388 self.signer.channel_keys_id()
8391 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8395 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8397 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8398 let secp_ctx = Secp256k1::signing_only();
8399 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8400 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8401 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8404 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8405 let secp_ctx = Secp256k1::signing_only();
8406 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8407 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8411 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8412 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8413 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8417 fn upfront_shutdown_script_incompatibility() {
8418 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8419 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8420 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8423 let seed = [42; 32];
8424 let network = Network::Testnet;
8425 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8426 keys_provider.expect(OnGetShutdownScriptpubkey {
8427 returns: non_v0_segwit_shutdown_script.clone(),
8430 let secp_ctx = Secp256k1::new();
8431 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8432 let config = UserConfig::default();
8433 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8434 Err(APIError::IncompatibleShutdownScript { script }) => {
8435 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8437 Err(e) => panic!("Unexpected error: {:?}", e),
8438 Ok(_) => panic!("Expected error"),
8442 // Check that, during channel creation, we use the same feerate in the open channel message
8443 // as we do in the Channel object creation itself.
8445 fn test_open_channel_msg_fee() {
8446 let original_fee = 253;
8447 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8448 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8449 let secp_ctx = Secp256k1::new();
8450 let seed = [42; 32];
8451 let network = Network::Testnet;
8452 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8454 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8455 let config = UserConfig::default();
8456 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8458 // Now change the fee so we can check that the fee in the open_channel message is the
8459 // same as the old fee.
8460 fee_est.fee_est = 500;
8461 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8462 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8466 fn test_holder_vs_counterparty_dust_limit() {
8467 // Test that when calculating the local and remote commitment transaction fees, the correct
8468 // dust limits are used.
8469 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8470 let secp_ctx = Secp256k1::new();
8471 let seed = [42; 32];
8472 let network = Network::Testnet;
8473 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8474 let logger = test_utils::TestLogger::new();
8475 let best_block = BestBlock::from_network(network);
8477 // Go through the flow of opening a channel between two nodes, making sure
8478 // they have different dust limits.
8480 // Create Node A's channel pointing to Node B's pubkey
8481 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8482 let config = UserConfig::default();
8483 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8485 // Create Node B's channel by receiving Node A's open_channel message
8486 // Make sure A's dust limit is as we expect.
8487 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8488 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8489 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8491 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8492 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8493 accept_channel_msg.dust_limit_satoshis = 546;
8494 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8495 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8497 // Node A --> Node B: funding created
8498 let output_script = node_a_chan.context.get_funding_redeemscript();
8499 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8500 value: 10000000, script_pubkey: output_script.clone(),
8502 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8503 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8504 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8506 // Node B --> Node A: funding signed
8507 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8508 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8510 // Put some inbound and outbound HTLCs in A's channel.
8511 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8512 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8514 amount_msat: htlc_amount_msat,
8515 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8516 cltv_expiry: 300000000,
8517 state: InboundHTLCState::Committed,
8520 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8522 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8523 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8524 cltv_expiry: 200000000,
8525 state: OutboundHTLCState::Committed,
8526 source: HTLCSource::OutboundRoute {
8527 path: Path { hops: Vec::new(), blinded_tail: None },
8528 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8529 first_hop_htlc_msat: 548,
8530 payment_id: PaymentId([42; 32]),
8532 skimmed_fee_msat: None,
8533 blinding_point: None,
8536 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8537 // the dust limit check.
8538 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8539 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8540 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8541 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8543 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8544 // of the HTLCs are seen to be above the dust limit.
8545 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8546 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8547 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8548 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8549 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8553 fn test_timeout_vs_success_htlc_dust_limit() {
8554 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8555 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8556 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8557 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8558 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8559 let secp_ctx = Secp256k1::new();
8560 let seed = [42; 32];
8561 let network = Network::Testnet;
8562 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8564 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8565 let config = UserConfig::default();
8566 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8568 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8569 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8571 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8572 // counted as dust when it shouldn't be.
8573 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8574 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8575 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8576 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8578 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8579 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8580 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8581 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8582 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8584 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8586 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8587 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8588 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8589 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8590 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8592 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8593 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8594 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8595 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8596 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8600 fn channel_reestablish_no_updates() {
8601 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8602 let logger = test_utils::TestLogger::new();
8603 let secp_ctx = Secp256k1::new();
8604 let seed = [42; 32];
8605 let network = Network::Testnet;
8606 let best_block = BestBlock::from_network(network);
8607 let chain_hash = ChainHash::using_genesis_block(network);
8608 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8610 // Go through the flow of opening a channel between two nodes.
8612 // Create Node A's channel pointing to Node B's pubkey
8613 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8614 let config = UserConfig::default();
8615 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8617 // Create Node B's channel by receiving Node A's open_channel message
8618 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8619 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8620 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8622 // Node B --> Node A: accept channel
8623 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8624 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8626 // Node A --> Node B: funding created
8627 let output_script = node_a_chan.context.get_funding_redeemscript();
8628 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8629 value: 10000000, script_pubkey: output_script.clone(),
8631 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8632 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8633 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8635 // Node B --> Node A: funding signed
8636 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8637 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8639 // Now disconnect the two nodes and check that the commitment point in
8640 // Node B's channel_reestablish message is sane.
8641 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8642 let msg = node_b_chan.get_channel_reestablish(&&logger);
8643 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8644 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8645 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8647 // Check that the commitment point in Node A's channel_reestablish message
8649 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8650 let msg = node_a_chan.get_channel_reestablish(&&logger);
8651 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8652 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8653 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8657 fn test_configured_holder_max_htlc_value_in_flight() {
8658 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8659 let logger = test_utils::TestLogger::new();
8660 let secp_ctx = Secp256k1::new();
8661 let seed = [42; 32];
8662 let network = Network::Testnet;
8663 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8664 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8665 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8667 let mut config_2_percent = UserConfig::default();
8668 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8669 let mut config_99_percent = UserConfig::default();
8670 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8671 let mut config_0_percent = UserConfig::default();
8672 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8673 let mut config_101_percent = UserConfig::default();
8674 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8676 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8677 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8678 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8679 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8680 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8681 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8683 // Test with the upper bound - 1 of valid values (99%).
8684 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8685 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8686 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8688 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8690 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8691 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8692 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8693 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8694 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8695 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8697 // Test with the upper bound - 1 of valid values (99%).
8698 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8699 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8700 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8702 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8703 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8704 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8705 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8706 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8708 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8709 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8711 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8712 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8713 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8715 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8716 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8717 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8718 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8719 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8721 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8722 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8724 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8725 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8726 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8730 fn test_configured_holder_selected_channel_reserve_satoshis() {
8732 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8733 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8734 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8736 // Test with valid but unreasonably high channel reserves
8737 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8738 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8739 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8741 // Test with calculated channel reserve less than lower bound
8742 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8743 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8745 // Test with invalid channel reserves since sum of both is greater than or equal
8747 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8748 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8751 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8752 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8753 let logger = test_utils::TestLogger::new();
8754 let secp_ctx = Secp256k1::new();
8755 let seed = [42; 32];
8756 let network = Network::Testnet;
8757 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8758 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8759 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8762 let mut outbound_node_config = UserConfig::default();
8763 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8764 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8766 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8767 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8769 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8770 let mut inbound_node_config = UserConfig::default();
8771 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8773 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8774 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8776 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8778 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8779 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8781 // Channel Negotiations failed
8782 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8783 assert!(result.is_err());
8788 fn channel_update() {
8789 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8790 let logger = test_utils::TestLogger::new();
8791 let secp_ctx = Secp256k1::new();
8792 let seed = [42; 32];
8793 let network = Network::Testnet;
8794 let best_block = BestBlock::from_network(network);
8795 let chain_hash = ChainHash::using_genesis_block(network);
8796 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8798 // Create Node A's channel pointing to Node B's pubkey
8799 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8800 let config = UserConfig::default();
8801 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8803 // Create Node B's channel by receiving Node A's open_channel message
8804 // Make sure A's dust limit is as we expect.
8805 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8806 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8807 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8809 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8810 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8811 accept_channel_msg.dust_limit_satoshis = 546;
8812 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8813 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8815 // Node A --> Node B: funding created
8816 let output_script = node_a_chan.context.get_funding_redeemscript();
8817 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8818 value: 10000000, script_pubkey: output_script.clone(),
8820 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8821 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8822 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8824 // Node B --> Node A: funding signed
8825 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8826 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8828 // Make sure that receiving a channel update will update the Channel as expected.
8829 let update = ChannelUpdate {
8830 contents: UnsignedChannelUpdate {
8832 short_channel_id: 0,
8835 cltv_expiry_delta: 100,
8836 htlc_minimum_msat: 5,
8837 htlc_maximum_msat: MAX_VALUE_MSAT,
8839 fee_proportional_millionths: 11,
8840 excess_data: Vec::new(),
8842 signature: Signature::from(unsafe { FFISignature::new() })
8844 assert!(node_a_chan.channel_update(&update).unwrap());
8846 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8847 // change our official htlc_minimum_msat.
8848 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8849 match node_a_chan.context.counterparty_forwarding_info() {
8851 assert_eq!(info.cltv_expiry_delta, 100);
8852 assert_eq!(info.fee_base_msat, 110);
8853 assert_eq!(info.fee_proportional_millionths, 11);
8855 None => panic!("expected counterparty forwarding info to be Some")
8858 assert!(!node_a_chan.channel_update(&update).unwrap());
8862 fn blinding_point_skimmed_fee_malformed_ser() {
8863 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
8865 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8866 let secp_ctx = Secp256k1::new();
8867 let seed = [42; 32];
8868 let network = Network::Testnet;
8869 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8871 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8872 let config = UserConfig::default();
8873 let features = channelmanager::provided_init_features(&config);
8874 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8875 let mut chan = Channel { context: outbound_chan.context };
8877 let dummy_htlc_source = HTLCSource::OutboundRoute {
8879 hops: vec![RouteHop {
8880 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8881 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8882 cltv_expiry_delta: 0, maybe_announced_channel: false,
8886 session_priv: test_utils::privkey(42),
8887 first_hop_htlc_msat: 0,
8888 payment_id: PaymentId([42; 32]),
8890 let dummy_outbound_output = OutboundHTLCOutput {
8893 payment_hash: PaymentHash([43; 32]),
8895 state: OutboundHTLCState::Committed,
8896 source: dummy_htlc_source.clone(),
8897 skimmed_fee_msat: None,
8898 blinding_point: None,
8900 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8901 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8903 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8906 htlc.skimmed_fee_msat = Some(1);
8909 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8911 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8914 payment_hash: PaymentHash([43; 32]),
8915 source: dummy_htlc_source.clone(),
8916 onion_routing_packet: msgs::OnionPacket {
8918 public_key: Ok(test_utils::pubkey(1)),
8919 hop_data: [0; 20*65],
8922 skimmed_fee_msat: None,
8923 blinding_point: None,
8925 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8926 payment_preimage: PaymentPreimage([42; 32]),
8929 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
8930 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
8932 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
8933 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
8935 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
8938 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8939 } else if i % 5 == 1 {
8940 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8941 } else if i % 5 == 2 {
8942 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8943 if let HTLCUpdateAwaitingACK::AddHTLC {
8944 ref mut blinding_point, ref mut skimmed_fee_msat, ..
8945 } = &mut dummy_add {
8946 *blinding_point = Some(test_utils::pubkey(42 + i));
8947 *skimmed_fee_msat = Some(42);
8949 holding_cell_htlc_updates.push(dummy_add);
8950 } else if i % 5 == 3 {
8951 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
8953 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
8956 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8958 // Encode and decode the channel and ensure that the HTLCs within are the same.
8959 let encoded_chan = chan.encode();
8960 let mut s = crate::io::Cursor::new(&encoded_chan);
8961 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8962 let features = channelmanager::provided_channel_type_features(&config);
8963 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8964 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8965 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8968 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8970 fn outbound_commitment_test() {
8971 use bitcoin::sighash;
8972 use bitcoin::consensus::encode::serialize;
8973 use bitcoin::sighash::EcdsaSighashType;
8974 use bitcoin::hashes::hex::FromHex;
8975 use bitcoin::hash_types::Txid;
8976 use bitcoin::secp256k1::Message;
8977 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8978 use crate::ln::PaymentPreimage;
8979 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8980 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8981 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8982 use crate::util::logger::Logger;
8983 use crate::sync::Arc;
8984 use core::str::FromStr;
8985 use hex::DisplayHex;
8987 // Test vectors from BOLT 3 Appendices C and F (anchors):
8988 let feeest = TestFeeEstimator{fee_est: 15000};
8989 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
8990 let secp_ctx = Secp256k1::new();
8992 let mut signer = InMemorySigner::new(
8994 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8995 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8996 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8997 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8998 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9000 // These aren't set in the test vectors:
9001 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
9007 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
9008 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
9009 let keys_provider = Keys { signer: signer.clone() };
9011 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9012 let mut config = UserConfig::default();
9013 config.channel_handshake_config.announced_channel = false;
9014 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
9015 chan.context.holder_dust_limit_satoshis = 546;
9016 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
9018 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
9020 let counterparty_pubkeys = ChannelPublicKeys {
9021 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
9022 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
9023 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
9024 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
9025 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
9027 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
9028 CounterpartyChannelTransactionParameters {
9029 pubkeys: counterparty_pubkeys.clone(),
9030 selected_contest_delay: 144
9032 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
9033 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9035 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9036 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9038 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9039 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9041 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9042 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9044 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9045 // derived from a commitment_seed, so instead we copy it here and call
9046 // build_commitment_transaction.
9047 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9048 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9049 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9050 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9051 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9053 macro_rules! test_commitment {
9054 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9055 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9056 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9060 macro_rules! test_commitment_with_anchors {
9061 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9062 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9063 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9067 macro_rules! test_commitment_common {
9068 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9069 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9071 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9072 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9074 let htlcs = commitment_stats.htlcs_included.drain(..)
9075 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9077 (commitment_stats.tx, htlcs)
9079 let trusted_tx = commitment_tx.trust();
9080 let unsigned_tx = trusted_tx.built_transaction();
9081 let redeemscript = chan.context.get_funding_redeemscript();
9082 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9083 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9084 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9085 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9087 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9088 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9089 let mut counterparty_htlc_sigs = Vec::new();
9090 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9092 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9093 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9094 counterparty_htlc_sigs.push(remote_signature);
9096 assert_eq!(htlcs.len(), per_htlc.len());
9098 let holder_commitment_tx = HolderCommitmentTransaction::new(
9099 commitment_tx.clone(),
9100 counterparty_signature,
9101 counterparty_htlc_sigs,
9102 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9103 chan.context.counterparty_funding_pubkey()
9105 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9106 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9108 let funding_redeemscript = chan.context.get_funding_redeemscript();
9109 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9110 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9112 // ((htlc, counterparty_sig), (index, holder_sig))
9113 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
9116 log_trace!(logger, "verifying htlc {}", $htlc_idx);
9117 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9119 let ref htlc = htlcs[$htlc_idx];
9120 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
9121 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9122 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9123 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9124 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9125 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9126 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9128 let mut preimage: Option<PaymentPreimage> = None;
9131 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9132 if out == htlc.payment_hash {
9133 preimage = Some(PaymentPreimage([i; 32]));
9137 assert!(preimage.is_some());
9140 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
9141 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
9142 channel_derivation_parameters: ChannelDerivationParameters {
9143 value_satoshis: chan.context.channel_value_satoshis,
9144 keys_id: chan.context.channel_keys_id,
9145 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9147 commitment_txid: trusted_tx.txid(),
9148 per_commitment_number: trusted_tx.commitment_number(),
9149 per_commitment_point: trusted_tx.per_commitment_point(),
9150 feerate_per_kw: trusted_tx.feerate_per_kw(),
9152 preimage: preimage.clone(),
9153 counterparty_sig: *htlc_counterparty_sig,
9154 }, &secp_ctx).unwrap();
9155 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9156 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9158 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9159 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9160 let trusted_tx = holder_commitment_tx.trust();
9161 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9162 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9163 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9165 assert!(htlc_counterparty_sig_iter.next().is_none());
9169 // anchors: simple commitment tx with no HTLCs and single anchor
9170 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9171 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9172 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9174 // simple commitment tx with no HTLCs
9175 chan.context.value_to_self_msat = 7000000000;
9177 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9178 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9179 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9181 // anchors: simple commitment tx with no HTLCs
9182 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9183 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9184 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9186 chan.context.pending_inbound_htlcs.push({
9187 let mut out = InboundHTLCOutput{
9189 amount_msat: 1000000,
9191 payment_hash: PaymentHash([0; 32]),
9192 state: InboundHTLCState::Committed,
9194 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9197 chan.context.pending_inbound_htlcs.push({
9198 let mut out = InboundHTLCOutput{
9200 amount_msat: 2000000,
9202 payment_hash: PaymentHash([0; 32]),
9203 state: InboundHTLCState::Committed,
9205 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9208 chan.context.pending_outbound_htlcs.push({
9209 let mut out = OutboundHTLCOutput{
9211 amount_msat: 2000000,
9213 payment_hash: PaymentHash([0; 32]),
9214 state: OutboundHTLCState::Committed,
9215 source: HTLCSource::dummy(),
9216 skimmed_fee_msat: None,
9217 blinding_point: None,
9219 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9222 chan.context.pending_outbound_htlcs.push({
9223 let mut out = OutboundHTLCOutput{
9225 amount_msat: 3000000,
9227 payment_hash: PaymentHash([0; 32]),
9228 state: OutboundHTLCState::Committed,
9229 source: HTLCSource::dummy(),
9230 skimmed_fee_msat: None,
9231 blinding_point: None,
9233 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9236 chan.context.pending_inbound_htlcs.push({
9237 let mut out = InboundHTLCOutput{
9239 amount_msat: 4000000,
9241 payment_hash: PaymentHash([0; 32]),
9242 state: InboundHTLCState::Committed,
9244 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9248 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9249 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9250 chan.context.feerate_per_kw = 0;
9252 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9253 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9254 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9257 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9258 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9259 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9262 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9263 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9264 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9267 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9268 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9269 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9272 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9273 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9274 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9277 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9278 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9279 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9282 // commitment tx with seven outputs untrimmed (maximum feerate)
9283 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9284 chan.context.feerate_per_kw = 647;
9286 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9287 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9288 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9291 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9292 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9293 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9296 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9297 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9298 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9301 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9302 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9303 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9306 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9307 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9308 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9311 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9312 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9313 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9316 // commitment tx with six outputs untrimmed (minimum feerate)
9317 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9318 chan.context.feerate_per_kw = 648;
9320 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9321 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9322 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9325 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9326 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9327 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9330 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9331 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9332 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9335 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9336 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9337 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9340 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9341 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9342 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9345 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9346 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9347 chan.context.feerate_per_kw = 645;
9348 chan.context.holder_dust_limit_satoshis = 1001;
9350 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9351 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9352 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9355 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9356 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9357 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9360 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9361 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9362 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9365 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9366 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9367 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9370 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9371 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9372 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9375 // commitment tx with six outputs untrimmed (maximum feerate)
9376 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9377 chan.context.feerate_per_kw = 2069;
9378 chan.context.holder_dust_limit_satoshis = 546;
9380 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9381 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9382 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9385 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9386 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9387 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9390 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9391 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9392 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9395 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9396 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9397 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9400 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9401 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9402 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9405 // commitment tx with five outputs untrimmed (minimum feerate)
9406 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9407 chan.context.feerate_per_kw = 2070;
9409 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9410 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9411 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9414 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9415 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9416 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9419 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9420 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9421 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9424 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9425 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9426 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9429 // commitment tx with five outputs untrimmed (maximum feerate)
9430 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9431 chan.context.feerate_per_kw = 2194;
9433 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9434 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9435 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9438 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9439 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9440 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9443 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9444 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9445 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9448 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9449 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9450 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9453 // commitment tx with four outputs untrimmed (minimum feerate)
9454 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9455 chan.context.feerate_per_kw = 2195;
9457 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9458 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9459 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9462 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9463 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9464 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9467 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9468 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9469 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9472 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9473 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9474 chan.context.feerate_per_kw = 2185;
9475 chan.context.holder_dust_limit_satoshis = 2001;
9476 let cached_channel_type = chan.context.channel_type;
9477 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9479 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9480 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9481 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9484 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9485 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9486 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9489 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9490 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9491 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9494 // commitment tx with four outputs untrimmed (maximum feerate)
9495 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9496 chan.context.feerate_per_kw = 3702;
9497 chan.context.holder_dust_limit_satoshis = 546;
9498 chan.context.channel_type = cached_channel_type.clone();
9500 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9501 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9502 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9505 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9506 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9507 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9510 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9511 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9512 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9515 // commitment tx with three outputs untrimmed (minimum feerate)
9516 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9517 chan.context.feerate_per_kw = 3703;
9519 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9520 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9521 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9524 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9525 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9526 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9529 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9530 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9531 chan.context.feerate_per_kw = 3687;
9532 chan.context.holder_dust_limit_satoshis = 3001;
9533 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9535 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9536 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9537 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9540 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9541 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9542 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9545 // commitment tx with three outputs untrimmed (maximum feerate)
9546 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9547 chan.context.feerate_per_kw = 4914;
9548 chan.context.holder_dust_limit_satoshis = 546;
9549 chan.context.channel_type = cached_channel_type.clone();
9551 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9552 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9553 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9556 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9557 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9558 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9561 // commitment tx with two outputs untrimmed (minimum feerate)
9562 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9563 chan.context.feerate_per_kw = 4915;
9564 chan.context.holder_dust_limit_satoshis = 546;
9566 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9567 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9568 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9570 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9571 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9572 chan.context.feerate_per_kw = 4894;
9573 chan.context.holder_dust_limit_satoshis = 4001;
9574 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9576 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9577 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9578 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9580 // commitment tx with two outputs untrimmed (maximum feerate)
9581 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9582 chan.context.feerate_per_kw = 9651180;
9583 chan.context.holder_dust_limit_satoshis = 546;
9584 chan.context.channel_type = cached_channel_type.clone();
9586 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9587 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9588 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9590 // commitment tx with one output untrimmed (minimum feerate)
9591 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9592 chan.context.feerate_per_kw = 9651181;
9594 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9595 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9596 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9598 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9599 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9600 chan.context.feerate_per_kw = 6216010;
9601 chan.context.holder_dust_limit_satoshis = 4001;
9602 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9604 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9605 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9606 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9608 // commitment tx with fee greater than funder amount
9609 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9610 chan.context.feerate_per_kw = 9651936;
9611 chan.context.holder_dust_limit_satoshis = 546;
9612 chan.context.channel_type = cached_channel_type;
9614 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9615 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9616 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9618 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9619 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9620 chan.context.feerate_per_kw = 253;
9621 chan.context.pending_inbound_htlcs.clear();
9622 chan.context.pending_inbound_htlcs.push({
9623 let mut out = InboundHTLCOutput{
9625 amount_msat: 2000000,
9627 payment_hash: PaymentHash([0; 32]),
9628 state: InboundHTLCState::Committed,
9630 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9633 chan.context.pending_outbound_htlcs.clear();
9634 chan.context.pending_outbound_htlcs.push({
9635 let mut out = OutboundHTLCOutput{
9637 amount_msat: 5000001,
9639 payment_hash: PaymentHash([0; 32]),
9640 state: OutboundHTLCState::Committed,
9641 source: HTLCSource::dummy(),
9642 skimmed_fee_msat: None,
9643 blinding_point: None,
9645 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9648 chan.context.pending_outbound_htlcs.push({
9649 let mut out = OutboundHTLCOutput{
9651 amount_msat: 5000000,
9653 payment_hash: PaymentHash([0; 32]),
9654 state: OutboundHTLCState::Committed,
9655 source: HTLCSource::dummy(),
9656 skimmed_fee_msat: None,
9657 blinding_point: None,
9659 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9663 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9664 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9665 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9668 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9669 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9670 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9672 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9673 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9674 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9676 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9677 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9678 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9681 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9682 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9683 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9684 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9687 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9688 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9689 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9691 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9692 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9693 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9695 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9696 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9697 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9702 fn test_per_commitment_secret_gen() {
9703 // Test vectors from BOLT 3 Appendix D:
9705 let mut seed = [0; 32];
9706 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9707 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9708 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9710 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9711 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9712 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9714 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9715 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9717 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9718 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9720 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9721 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9722 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9726 fn test_key_derivation() {
9727 // Test vectors from BOLT 3 Appendix E:
9728 let secp_ctx = Secp256k1::new();
9730 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9731 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9733 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9734 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9736 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9737 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9739 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9740 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9742 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9743 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9745 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9746 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9750 fn test_zero_conf_channel_type_support() {
9751 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9752 let secp_ctx = Secp256k1::new();
9753 let seed = [42; 32];
9754 let network = Network::Testnet;
9755 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9756 let logger = test_utils::TestLogger::new();
9758 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9759 let config = UserConfig::default();
9760 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9761 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9763 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9764 channel_type_features.set_zero_conf_required();
9766 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9767 open_channel_msg.channel_type = Some(channel_type_features);
9768 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9769 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9770 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9771 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9772 assert!(res.is_ok());
9776 fn test_supports_anchors_zero_htlc_tx_fee() {
9777 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9778 // resulting `channel_type`.
9779 let secp_ctx = Secp256k1::new();
9780 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9781 let network = Network::Testnet;
9782 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9783 let logger = test_utils::TestLogger::new();
9785 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9786 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9788 let mut config = UserConfig::default();
9789 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9791 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9792 // need to signal it.
9793 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9794 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9795 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9796 &config, 0, 42, None
9798 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9800 let mut expected_channel_type = ChannelTypeFeatures::empty();
9801 expected_channel_type.set_static_remote_key_required();
9802 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9804 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9805 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9806 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9810 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9811 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9812 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9813 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9814 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9817 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9818 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9822 fn test_rejects_implicit_simple_anchors() {
9823 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9824 // each side's `InitFeatures`, it is rejected.
9825 let secp_ctx = Secp256k1::new();
9826 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9827 let network = Network::Testnet;
9828 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9829 let logger = test_utils::TestLogger::new();
9831 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9832 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9834 let config = UserConfig::default();
9836 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9837 let static_remote_key_required: u64 = 1 << 12;
9838 let simple_anchors_required: u64 = 1 << 20;
9839 let raw_init_features = static_remote_key_required | simple_anchors_required;
9840 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9842 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9843 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9844 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9848 // Set `channel_type` to `None` to force the implicit feature negotiation.
9849 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9850 open_channel_msg.channel_type = None;
9852 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9853 // `static_remote_key`, it will fail the channel.
9854 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9855 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9856 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9857 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9859 assert!(channel_b.is_err());
9863 fn test_rejects_simple_anchors_channel_type() {
9864 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9866 let secp_ctx = Secp256k1::new();
9867 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9868 let network = Network::Testnet;
9869 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9870 let logger = test_utils::TestLogger::new();
9872 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9873 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9875 let config = UserConfig::default();
9877 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9878 let static_remote_key_required: u64 = 1 << 12;
9879 let simple_anchors_required: u64 = 1 << 20;
9880 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9881 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9882 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9883 assert!(!simple_anchors_init.requires_unknown_bits());
9884 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9886 // First, we'll try to open a channel between A and B where A requests a channel type for
9887 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9888 // B as it's not supported by LDK.
9889 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9890 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9891 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9895 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9896 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9898 let res = InboundV1Channel::<&TestKeysInterface>::new(
9899 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9900 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9901 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9903 assert!(res.is_err());
9905 // Then, we'll try to open another channel where A requests a channel type for
9906 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9907 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9909 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9910 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9911 10000000, 100000, 42, &config, 0, 42, None
9914 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9916 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9917 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9918 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9919 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9922 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9923 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9925 let res = channel_a.accept_channel(
9926 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9928 assert!(res.is_err());
9932 fn test_waiting_for_batch() {
9933 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9934 let logger = test_utils::TestLogger::new();
9935 let secp_ctx = Secp256k1::new();
9936 let seed = [42; 32];
9937 let network = Network::Testnet;
9938 let best_block = BestBlock::from_network(network);
9939 let chain_hash = ChainHash::using_genesis_block(network);
9940 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9942 let mut config = UserConfig::default();
9943 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9944 // channel in a batch before all channels are ready.
9945 config.channel_handshake_limits.trust_own_funding_0conf = true;
9947 // Create a channel from node a to node b that will be part of batch funding.
9948 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9949 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9954 &channelmanager::provided_init_features(&config),
9964 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9965 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9966 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9971 &channelmanager::provided_channel_type_features(&config),
9972 &channelmanager::provided_init_features(&config),
9978 true, // Allow node b to send a 0conf channel_ready.
9981 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9982 node_a_chan.accept_channel(
9983 &accept_channel_msg,
9984 &config.channel_handshake_limits,
9985 &channelmanager::provided_init_features(&config),
9988 // Fund the channel with a batch funding transaction.
9989 let output_script = node_a_chan.context.get_funding_redeemscript();
9990 let tx = Transaction {
9992 lock_time: LockTime::ZERO,
9996 value: 10000000, script_pubkey: output_script.clone(),
9999 value: 10000000, script_pubkey: Builder::new().into_script(),
10002 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10003 let funding_created_msg = node_a_chan.get_funding_created(
10004 tx.clone(), funding_outpoint, true, &&logger,
10005 ).map_err(|_| ()).unwrap();
10006 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
10007 &funding_created_msg.unwrap(),
10011 ).map_err(|_| ()).unwrap();
10012 let node_b_updates = node_b_chan.monitor_updating_restored(
10020 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
10021 // broadcasting the funding transaction until the batch is ready.
10022 let res = node_a_chan.funding_signed(
10023 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
10025 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10026 let node_a_updates = node_a_chan.monitor_updating_restored(
10033 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10034 // as the funding transaction depends on all channels in the batch becoming ready.
10035 assert!(node_a_updates.channel_ready.is_none());
10036 assert!(node_a_updates.funding_broadcastable.is_none());
10037 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10039 // It is possible to receive a 0conf channel_ready from the remote node.
10040 node_a_chan.channel_ready(
10041 &node_b_updates.channel_ready.unwrap(),
10049 node_a_chan.context.channel_state,
10050 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10053 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10054 node_a_chan.set_batch_ready();
10055 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10056 assert!(node_a_chan.check_get_channel_ready(0).is_some());