1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
265 sha256_of_onion: [u8; 32],
269 macro_rules! define_state_flags {
270 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
271 #[doc = $flag_type_doc]
272 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
273 struct $flag_type(u32);
278 const $flag: $flag_type = $flag_type($value);
281 /// All flags that apply to the specified [`ChannelState`] variant.
283 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
286 fn new() -> Self { Self(0) }
289 fn from_u32(flags: u32) -> Result<Self, ()> {
290 if flags & !Self::ALL.0 != 0 {
293 Ok($flag_type(flags))
298 fn is_empty(&self) -> bool { self.0 == 0 }
301 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
304 impl core::ops::Not for $flag_type {
306 fn not(self) -> Self::Output { Self(!self.0) }
308 impl core::ops::BitOr for $flag_type {
310 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
312 impl core::ops::BitOrAssign for $flag_type {
313 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
315 impl core::ops::BitAnd for $flag_type {
317 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
319 impl core::ops::BitAndAssign for $flag_type {
320 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
323 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
324 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
326 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
327 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
328 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
330 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
332 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
333 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
335 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
337 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
339 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
340 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
342 impl PartialEq<FundedStateFlags> for $flag_type {
343 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
345 impl From<FundedStateFlags> for $flag_type {
346 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
351 /// We declare all the states/flags here together to help determine which bits are still available
354 pub const OUR_INIT_SENT: u32 = 1 << 0;
355 pub const THEIR_INIT_SENT: u32 = 1 << 1;
356 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
357 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
358 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
359 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
360 pub const CHANNEL_READY: u32 = 1 << 6;
361 pub const PEER_DISCONNECTED: u32 = 1 << 7;
362 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
363 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
364 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
365 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
366 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
367 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
371 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
373 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
374 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
375 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
376 somewhere and we should pause sending any outbound messages until they've managed to \
377 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
378 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
379 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
380 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
381 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
382 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
387 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
388 NegotiatingFundingFlags, [
389 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
390 OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
391 ("Indicates we have received their `open_channel`/`accept_channel` message.",
392 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
397 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
398 FUNDED_STATE, AwaitingChannelReadyFlags, [
399 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
400 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
401 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
402 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
403 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
404 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
405 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
406 is being held until all channels in the batch have received `funding_signed` and have \
407 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
412 "Flags that only apply to [`ChannelState::ChannelReady`].",
413 FUNDED_STATE, ChannelReadyFlags, [
414 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
415 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
416 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
417 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
418 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
422 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
424 /// We are negotiating the parameters required for the channel prior to funding it.
425 NegotiatingFunding(NegotiatingFundingFlags),
426 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
427 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
428 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
430 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
431 /// funding transaction to confirm.
432 AwaitingChannelReady(AwaitingChannelReadyFlags),
433 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
435 ChannelReady(ChannelReadyFlags),
436 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
437 /// is about to drop us, but we store this anyway.
441 macro_rules! impl_state_flag {
442 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
444 fn $get(&self) -> bool {
447 ChannelState::$state(flags) => flags.is_set($state_flag.into()),
456 ChannelState::$state(flags) => *flags |= $state_flag,
458 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
462 fn $clear(&mut self) {
465 ChannelState::$state(flags) => *flags &= !($state_flag),
467 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
471 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
472 impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
474 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
475 impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
480 fn from_u32(state: u32) -> Result<Self, ()> {
482 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
483 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
485 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
486 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
487 .map(|flags| ChannelState::AwaitingChannelReady(flags))
488 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
489 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
490 .map(|flags| ChannelState::ChannelReady(flags))
491 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
492 Ok(ChannelState::NegotiatingFunding(flags))
500 fn to_u32(&self) -> u32 {
502 ChannelState::NegotiatingFunding(flags) => flags.0,
503 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
504 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
505 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
506 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
510 fn is_pre_funded_state(&self) -> bool {
511 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
514 fn is_both_sides_shutdown(&self) -> bool {
515 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
518 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
520 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
521 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
522 _ => FundedStateFlags::new(),
526 fn should_force_holding_cell(&self) -> bool {
528 ChannelState::ChannelReady(flags) =>
529 flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
530 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
531 flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
533 debug_assert!(false, "The holding cell is only valid within ChannelReady");
539 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
540 FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
541 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
542 FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
543 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
544 FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
545 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
546 FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
547 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
548 AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
549 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
550 AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
551 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
552 AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
553 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
554 ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
557 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
559 pub const DEFAULT_MAX_HTLCS: u16 = 50;
561 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
562 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
563 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
564 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
568 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
570 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
572 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
574 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
575 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
576 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
577 /// `holder_max_htlc_value_in_flight_msat`.
578 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
580 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
581 /// `option_support_large_channel` (aka wumbo channels) is not supported.
583 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
585 /// Total bitcoin supply in satoshis.
586 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
588 /// The maximum network dust limit for standard script formats. This currently represents the
589 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
590 /// transaction non-standard and thus refuses to relay it.
591 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
592 /// implementations use this value for their dust limit today.
593 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
595 /// The maximum channel dust limit we will accept from our counterparty.
596 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
598 /// The dust limit is used for both the commitment transaction outputs as well as the closing
599 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
600 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
601 /// In order to avoid having to concern ourselves with standardness during the closing process, we
602 /// simply require our counterparty to use a dust limit which will leave any segwit output
604 /// See <https://github.com/lightning/bolts/issues/905> for more details.
605 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
607 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
608 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
610 /// Used to return a simple Error back to ChannelManager. Will get converted to a
611 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
612 /// channel_id in ChannelManager.
613 pub(super) enum ChannelError {
619 impl fmt::Debug for ChannelError {
620 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
622 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
623 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
624 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
629 impl fmt::Display for ChannelError {
630 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
632 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
633 &ChannelError::Warn(ref e) => write!(f, "{}", e),
634 &ChannelError::Close(ref e) => write!(f, "{}", e),
639 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
641 pub peer_id: Option<PublicKey>,
642 pub channel_id: Option<ChannelId>,
645 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
646 fn log(&self, mut record: Record) {
647 record.peer_id = self.peer_id;
648 record.channel_id = self.channel_id;
649 self.logger.log(record)
653 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
654 where L::Target: Logger {
655 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
656 where S::Target: SignerProvider
660 peer_id: Some(context.counterparty_node_id),
661 channel_id: Some(context.channel_id),
666 macro_rules! secp_check {
667 ($res: expr, $err: expr) => {
670 Err(_) => return Err(ChannelError::Close($err)),
675 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
676 /// our counterparty or not. However, we don't want to announce updates right away to avoid
677 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
678 /// our channel_update message and track the current state here.
679 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
680 #[derive(Clone, Copy, PartialEq)]
681 pub(super) enum ChannelUpdateStatus {
682 /// We've announced the channel as enabled and are connected to our peer.
684 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
686 /// Our channel is live again, but we haven't announced the channel as enabled yet.
688 /// We've announced the channel as disabled.
692 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
694 pub enum AnnouncementSigsState {
695 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
696 /// we sent the last `AnnouncementSignatures`.
698 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
699 /// This state never appears on disk - instead we write `NotSent`.
701 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
702 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
703 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
704 /// they send back a `RevokeAndACK`.
705 /// This state never appears on disk - instead we write `NotSent`.
707 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
708 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
712 /// An enum indicating whether the local or remote side offered a given HTLC.
718 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
721 pending_htlcs_value_msat: u64,
722 on_counterparty_tx_dust_exposure_msat: u64,
723 on_holder_tx_dust_exposure_msat: u64,
724 holding_cell_msat: u64,
725 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
728 /// An enum gathering stats on commitment transaction, either local or remote.
729 struct CommitmentStats<'a> {
730 tx: CommitmentTransaction, // the transaction info
731 feerate_per_kw: u32, // the feerate included to build the transaction
732 total_fee_sat: u64, // the total fee included in the transaction
733 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
734 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
735 local_balance_msat: u64, // local balance before fees *not* considering dust limits
736 remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
737 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
738 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
741 /// Used when calculating whether we or the remote can afford an additional HTLC.
742 struct HTLCCandidate {
744 origin: HTLCInitiator,
748 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
756 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
758 enum UpdateFulfillFetch {
760 monitor_update: ChannelMonitorUpdate,
761 htlc_value_msat: u64,
762 msg: Option<msgs::UpdateFulfillHTLC>,
767 /// The return type of get_update_fulfill_htlc_and_commit.
768 pub enum UpdateFulfillCommitFetch {
769 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
770 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
771 /// previously placed in the holding cell (and has since been removed).
773 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
774 monitor_update: ChannelMonitorUpdate,
775 /// The value of the HTLC which was claimed, in msat.
776 htlc_value_msat: u64,
778 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
779 /// or has been forgotten (presumably previously claimed).
783 /// The return value of `monitor_updating_restored`
784 pub(super) struct MonitorRestoreUpdates {
785 pub raa: Option<msgs::RevokeAndACK>,
786 pub commitment_update: Option<msgs::CommitmentUpdate>,
787 pub order: RAACommitmentOrder,
788 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
789 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
790 pub finalized_claimed_htlcs: Vec<HTLCSource>,
791 pub funding_broadcastable: Option<Transaction>,
792 pub channel_ready: Option<msgs::ChannelReady>,
793 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
796 /// The return value of `signer_maybe_unblocked`
798 pub(super) struct SignerResumeUpdates {
799 pub commitment_update: Option<msgs::CommitmentUpdate>,
800 pub funding_signed: Option<msgs::FundingSigned>,
801 pub channel_ready: Option<msgs::ChannelReady>,
804 /// The return value of `channel_reestablish`
805 pub(super) struct ReestablishResponses {
806 pub channel_ready: Option<msgs::ChannelReady>,
807 pub raa: Option<msgs::RevokeAndACK>,
808 pub commitment_update: Option<msgs::CommitmentUpdate>,
809 pub order: RAACommitmentOrder,
810 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
811 pub shutdown_msg: Option<msgs::Shutdown>,
814 /// The result of a shutdown that should be handled.
816 pub(crate) struct ShutdownResult {
817 /// A channel monitor update to apply.
818 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
819 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
820 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
821 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
822 /// propagated to the remainder of the batch.
823 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
824 pub(crate) channel_id: ChannelId,
825 pub(crate) counterparty_node_id: PublicKey,
828 /// If the majority of the channels funds are to the fundee and the initiator holds only just
829 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
830 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
831 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
832 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
833 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
834 /// by this multiple without hitting this case, before sending.
835 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
836 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
837 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
838 /// leave the channel less usable as we hold a bigger reserve.
839 #[cfg(any(fuzzing, test))]
840 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
841 #[cfg(not(any(fuzzing, test)))]
842 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
844 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
845 /// channel creation on an inbound channel, we simply force-close and move on.
846 /// This constant is the one suggested in BOLT 2.
847 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
849 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
850 /// not have enough balance value remaining to cover the onchain cost of this new
851 /// HTLC weight. If this happens, our counterparty fails the reception of our
852 /// commitment_signed including this new HTLC due to infringement on the channel
854 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
855 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
856 /// leads to a channel force-close. Ultimately, this is an issue coming from the
857 /// design of LN state machines, allowing asynchronous updates.
858 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
860 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
861 /// commitment transaction fees, with at least this many HTLCs present on the commitment
862 /// transaction (not counting the value of the HTLCs themselves).
863 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
865 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
866 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
867 /// ChannelUpdate prompted by the config update. This value was determined as follows:
869 /// * The expected interval between ticks (1 minute).
870 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
871 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
872 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
873 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
875 /// The number of ticks that may elapse while we're waiting for a response to a
876 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
879 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
880 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
882 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
883 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
884 /// exceeding this age limit will be force-closed and purged from memory.
885 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
887 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
888 pub(crate) const COINBASE_MATURITY: u32 = 100;
890 struct PendingChannelMonitorUpdate {
891 update: ChannelMonitorUpdate,
894 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
895 (0, update, required),
898 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
899 /// its variants containing an appropriate channel struct.
900 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
901 UnfundedOutboundV1(OutboundV1Channel<SP>),
902 UnfundedInboundV1(InboundV1Channel<SP>),
906 impl<'a, SP: Deref> ChannelPhase<SP> where
907 SP::Target: SignerProvider,
908 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
910 pub fn context(&'a self) -> &'a ChannelContext<SP> {
912 ChannelPhase::Funded(chan) => &chan.context,
913 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
914 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
918 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
920 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
921 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
922 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
927 /// Contains all state common to unfunded inbound/outbound channels.
928 pub(super) struct UnfundedChannelContext {
929 /// A counter tracking how many ticks have elapsed since this unfunded channel was
930 /// created. If this unfunded channel reaches peer has yet to respond after reaching
931 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
933 /// This is so that we don't keep channels around that haven't progressed to a funded state
934 /// in a timely manner.
935 unfunded_channel_age_ticks: usize,
938 impl UnfundedChannelContext {
939 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
940 /// having reached the unfunded channel age limit.
942 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
943 pub fn should_expire_unfunded_channel(&mut self) -> bool {
944 self.unfunded_channel_age_ticks += 1;
945 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
949 /// Contains everything about the channel including state, and various flags.
950 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
951 config: LegacyChannelConfig,
953 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
954 // constructed using it. The second element in the tuple corresponds to the number of ticks that
955 // have elapsed since the update occurred.
956 prev_config: Option<(ChannelConfig, usize)>,
958 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
962 /// The current channel ID.
963 channel_id: ChannelId,
964 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
965 /// Will be `None` for channels created prior to 0.0.115.
966 temporary_channel_id: Option<ChannelId>,
967 channel_state: ChannelState,
969 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
970 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
972 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
973 // Note that a number of our tests were written prior to the behavior here which retransmits
974 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
976 #[cfg(any(test, feature = "_test_utils"))]
977 pub(crate) announcement_sigs_state: AnnouncementSigsState,
978 #[cfg(not(any(test, feature = "_test_utils")))]
979 announcement_sigs_state: AnnouncementSigsState,
981 secp_ctx: Secp256k1<secp256k1::All>,
982 channel_value_satoshis: u64,
984 latest_monitor_update_id: u64,
986 holder_signer: ChannelSignerType<SP>,
987 shutdown_scriptpubkey: Option<ShutdownScript>,
988 destination_script: ScriptBuf,
990 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
991 // generation start at 0 and count up...this simplifies some parts of implementation at the
992 // cost of others, but should really just be changed.
994 cur_holder_commitment_transaction_number: u64,
995 cur_counterparty_commitment_transaction_number: u64,
996 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
997 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
998 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
999 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1001 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1002 /// need to ensure we resend them in the order we originally generated them. Note that because
1003 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1004 /// sufficient to simply set this to the opposite of any message we are generating as we
1005 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1006 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1008 resend_order: RAACommitmentOrder,
1010 monitor_pending_channel_ready: bool,
1011 monitor_pending_revoke_and_ack: bool,
1012 monitor_pending_commitment_signed: bool,
1014 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1015 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1016 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1017 // but need to handle this somehow or we run the risk of losing HTLCs!
1018 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1019 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1020 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1022 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1023 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1024 /// the future when the signer indicates it may have a signature for us.
1026 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1027 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1028 signer_pending_commitment_update: bool,
1029 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1030 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1031 /// outbound or inbound.
1032 signer_pending_funding: bool,
1034 // pending_update_fee is filled when sending and receiving update_fee.
1036 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1037 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1038 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1039 // HTLCs with similar state.
1040 pending_update_fee: Option<(u32, FeeUpdateState)>,
1041 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1042 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1043 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1044 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1045 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1046 holding_cell_update_fee: Option<u32>,
1047 next_holder_htlc_id: u64,
1048 next_counterparty_htlc_id: u64,
1049 feerate_per_kw: u32,
1051 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1052 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1053 /// new block is received, ensuring it's always at least moderately close to the current real
1055 update_time_counter: u32,
1057 #[cfg(debug_assertions)]
1058 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1059 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1060 #[cfg(debug_assertions)]
1061 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1062 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1064 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1065 target_closing_feerate_sats_per_kw: Option<u32>,
1067 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1068 /// update, we need to delay processing it until later. We do that here by simply storing the
1069 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1070 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1072 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1073 /// transaction. These are set once we reach `closing_negotiation_ready`.
1075 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1077 closing_fee_limits: Option<(u64, u64)>,
1079 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1080 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1081 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1082 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1083 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1085 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1086 /// until we see a `commitment_signed` before doing so.
1088 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1089 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1090 expecting_peer_commitment_signed: bool,
1092 /// The hash of the block in which the funding transaction was included.
1093 funding_tx_confirmed_in: Option<BlockHash>,
1094 funding_tx_confirmation_height: u32,
1095 short_channel_id: Option<u64>,
1096 /// Either the height at which this channel was created or the height at which it was last
1097 /// serialized if it was serialized by versions prior to 0.0.103.
1098 /// We use this to close if funding is never broadcasted.
1099 channel_creation_height: u32,
1101 counterparty_dust_limit_satoshis: u64,
1104 pub(super) holder_dust_limit_satoshis: u64,
1106 holder_dust_limit_satoshis: u64,
1109 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1111 counterparty_max_htlc_value_in_flight_msat: u64,
1114 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1116 holder_max_htlc_value_in_flight_msat: u64,
1118 /// minimum channel reserve for self to maintain - set by them.
1119 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1122 pub(super) holder_selected_channel_reserve_satoshis: u64,
1124 holder_selected_channel_reserve_satoshis: u64,
1126 counterparty_htlc_minimum_msat: u64,
1127 holder_htlc_minimum_msat: u64,
1129 pub counterparty_max_accepted_htlcs: u16,
1131 counterparty_max_accepted_htlcs: u16,
1132 holder_max_accepted_htlcs: u16,
1133 minimum_depth: Option<u32>,
1135 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1137 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1138 funding_transaction: Option<Transaction>,
1139 is_batch_funding: Option<()>,
1141 counterparty_cur_commitment_point: Option<PublicKey>,
1142 counterparty_prev_commitment_point: Option<PublicKey>,
1143 counterparty_node_id: PublicKey,
1145 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1147 commitment_secrets: CounterpartyCommitmentSecrets,
1149 channel_update_status: ChannelUpdateStatus,
1150 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1151 /// not complete within a single timer tick (one minute), we should force-close the channel.
1152 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1154 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1155 /// our peer and start the closing_signed negotiation fresh.
1156 closing_signed_in_flight: bool,
1158 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1159 /// This can be used to rebroadcast the channel_announcement message later.
1160 announcement_sigs: Option<(Signature, Signature)>,
1162 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1163 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1164 // be, by comparing the cached values to the fee of the tranaction generated by
1165 // `build_commitment_transaction`.
1166 #[cfg(any(test, fuzzing))]
1167 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1168 #[cfg(any(test, fuzzing))]
1169 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1171 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1172 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1173 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1174 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1175 /// message until we receive a channel_reestablish.
1177 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1178 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1180 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1181 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1182 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1183 /// unblock the state machine.
1185 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1186 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1187 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1189 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1190 /// [`msgs::RevokeAndACK`] message from the counterparty.
1191 sent_message_awaiting_response: Option<usize>,
1193 #[cfg(any(test, fuzzing))]
1194 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1195 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1196 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1197 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1198 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1199 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1200 historical_inbound_htlc_fulfills: HashSet<u64>,
1202 /// This channel's type, as negotiated during channel open
1203 channel_type: ChannelTypeFeatures,
1205 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1206 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1207 // the channel's funding UTXO.
1209 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1210 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1211 // associated channel mapping.
1213 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1214 // to store all of them.
1215 latest_inbound_scid_alias: Option<u64>,
1217 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1218 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1219 // don't currently support node id aliases and eventually privacy should be provided with
1220 // blinded paths instead of simple scid+node_id aliases.
1221 outbound_scid_alias: u64,
1223 // We track whether we already emitted a `ChannelPending` event.
1224 channel_pending_event_emitted: bool,
1226 // We track whether we already emitted a `ChannelReady` event.
1227 channel_ready_event_emitted: bool,
1229 /// The unique identifier used to re-derive the private key material for the channel through
1230 /// [`SignerProvider::derive_channel_signer`].
1231 channel_keys_id: [u8; 32],
1233 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1234 /// store it here and only release it to the `ChannelManager` once it asks for it.
1235 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1238 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1239 /// Allowed in any state (including after shutdown)
1240 pub fn get_update_time_counter(&self) -> u32 {
1241 self.update_time_counter
1244 pub fn get_latest_monitor_update_id(&self) -> u64 {
1245 self.latest_monitor_update_id
1248 pub fn should_announce(&self) -> bool {
1249 self.config.announced_channel
1252 pub fn is_outbound(&self) -> bool {
1253 self.channel_transaction_parameters.is_outbound_from_holder
1256 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1257 /// Allowed in any state (including after shutdown)
1258 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1259 self.config.options.forwarding_fee_base_msat
1262 /// Returns true if we've ever received a message from the remote end for this Channel
1263 pub fn have_received_message(&self) -> bool {
1264 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1267 /// Returns true if this channel is fully established and not known to be closing.
1268 /// Allowed in any state (including after shutdown)
1269 pub fn is_usable(&self) -> bool {
1270 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1271 !self.channel_state.is_local_shutdown_sent() &&
1272 !self.channel_state.is_remote_shutdown_sent() &&
1273 !self.monitor_pending_channel_ready
1276 /// shutdown state returns the state of the channel in its various stages of shutdown
1277 pub fn shutdown_state(&self) -> ChannelShutdownState {
1278 match self.channel_state {
1279 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1280 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1281 ChannelShutdownState::ShutdownInitiated
1282 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1283 ChannelShutdownState::ResolvingHTLCs
1284 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1285 ChannelShutdownState::NegotiatingClosingFee
1287 ChannelShutdownState::NotShuttingDown
1289 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1290 _ => ChannelShutdownState::NotShuttingDown,
1294 fn closing_negotiation_ready(&self) -> bool {
1295 let is_ready_to_close = match self.channel_state {
1296 ChannelState::AwaitingChannelReady(flags) =>
1297 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1298 ChannelState::ChannelReady(flags) =>
1299 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1302 self.pending_inbound_htlcs.is_empty() &&
1303 self.pending_outbound_htlcs.is_empty() &&
1304 self.pending_update_fee.is_none() &&
1308 /// Returns true if this channel is currently available for use. This is a superset of
1309 /// is_usable() and considers things like the channel being temporarily disabled.
1310 /// Allowed in any state (including after shutdown)
1311 pub fn is_live(&self) -> bool {
1312 self.is_usable() && !self.channel_state.is_peer_disconnected()
1315 // Public utilities:
1317 pub fn channel_id(&self) -> ChannelId {
1321 // Return the `temporary_channel_id` used during channel establishment.
1323 // Will return `None` for channels created prior to LDK version 0.0.115.
1324 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1325 self.temporary_channel_id
1328 pub fn minimum_depth(&self) -> Option<u32> {
1332 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1333 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1334 pub fn get_user_id(&self) -> u128 {
1338 /// Gets the channel's type
1339 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1343 /// Gets the channel's `short_channel_id`.
1345 /// Will return `None` if the channel hasn't been confirmed yet.
1346 pub fn get_short_channel_id(&self) -> Option<u64> {
1347 self.short_channel_id
1350 /// Allowed in any state (including after shutdown)
1351 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1352 self.latest_inbound_scid_alias
1355 /// Allowed in any state (including after shutdown)
1356 pub fn outbound_scid_alias(&self) -> u64 {
1357 self.outbound_scid_alias
1360 /// Returns the holder signer for this channel.
1362 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1363 return &self.holder_signer
1366 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1367 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1368 /// or prior to any channel actions during `Channel` initialization.
1369 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1370 debug_assert_eq!(self.outbound_scid_alias, 0);
1371 self.outbound_scid_alias = outbound_scid_alias;
1374 /// Returns the funding_txo we either got from our peer, or were given by
1375 /// get_funding_created.
1376 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1377 self.channel_transaction_parameters.funding_outpoint
1380 /// Returns the height in which our funding transaction was confirmed.
1381 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1382 let conf_height = self.funding_tx_confirmation_height;
1383 if conf_height > 0 {
1390 /// Returns the block hash in which our funding transaction was confirmed.
1391 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1392 self.funding_tx_confirmed_in
1395 /// Returns the current number of confirmations on the funding transaction.
1396 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1397 if self.funding_tx_confirmation_height == 0 {
1398 // We either haven't seen any confirmation yet, or observed a reorg.
1402 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1405 fn get_holder_selected_contest_delay(&self) -> u16 {
1406 self.channel_transaction_parameters.holder_selected_contest_delay
1409 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1410 &self.channel_transaction_parameters.holder_pubkeys
1413 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1414 self.channel_transaction_parameters.counterparty_parameters
1415 .as_ref().map(|params| params.selected_contest_delay)
1418 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1419 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1422 /// Allowed in any state (including after shutdown)
1423 pub fn get_counterparty_node_id(&self) -> PublicKey {
1424 self.counterparty_node_id
1427 /// Allowed in any state (including after shutdown)
1428 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1429 self.holder_htlc_minimum_msat
1432 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1433 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1434 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1437 /// Allowed in any state (including after shutdown)
1438 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1440 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1441 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1442 // channel might have been used to route very small values (either by honest users or as DoS).
1443 self.channel_value_satoshis * 1000 * 9 / 10,
1445 self.counterparty_max_htlc_value_in_flight_msat
1449 /// Allowed in any state (including after shutdown)
1450 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1451 self.counterparty_htlc_minimum_msat
1454 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1455 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1456 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1459 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1460 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1461 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1463 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1464 party_max_htlc_value_in_flight_msat
1469 pub fn get_value_satoshis(&self) -> u64 {
1470 self.channel_value_satoshis
1473 pub fn get_fee_proportional_millionths(&self) -> u32 {
1474 self.config.options.forwarding_fee_proportional_millionths
1477 pub fn get_cltv_expiry_delta(&self) -> u16 {
1478 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1481 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1482 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1483 where F::Target: FeeEstimator
1485 match self.config.options.max_dust_htlc_exposure {
1486 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1487 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1488 ConfirmationTarget::OnChainSweep) as u64;
1489 feerate_per_kw.saturating_mul(multiplier)
1491 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1495 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1496 pub fn prev_config(&self) -> Option<ChannelConfig> {
1497 self.prev_config.map(|prev_config| prev_config.0)
1500 // Checks whether we should emit a `ChannelPending` event.
1501 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1502 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1505 // Returns whether we already emitted a `ChannelPending` event.
1506 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1507 self.channel_pending_event_emitted
1510 // Remembers that we already emitted a `ChannelPending` event.
1511 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1512 self.channel_pending_event_emitted = true;
1515 // Checks whether we should emit a `ChannelReady` event.
1516 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1517 self.is_usable() && !self.channel_ready_event_emitted
1520 // Remembers that we already emitted a `ChannelReady` event.
1521 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1522 self.channel_ready_event_emitted = true;
1525 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1526 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1527 /// no longer be considered when forwarding HTLCs.
1528 pub fn maybe_expire_prev_config(&mut self) {
1529 if self.prev_config.is_none() {
1532 let prev_config = self.prev_config.as_mut().unwrap();
1534 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1535 self.prev_config = None;
1539 /// Returns the current [`ChannelConfig`] applied to the channel.
1540 pub fn config(&self) -> ChannelConfig {
1544 /// Updates the channel's config. A bool is returned indicating whether the config update
1545 /// applied resulted in a new ChannelUpdate message.
1546 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1547 let did_channel_update =
1548 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1549 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1550 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1551 if did_channel_update {
1552 self.prev_config = Some((self.config.options, 0));
1553 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1554 // policy change to propagate throughout the network.
1555 self.update_time_counter += 1;
1557 self.config.options = *config;
1561 /// Returns true if funding_signed was sent/received and the
1562 /// funding transaction has been broadcast if necessary.
1563 pub fn is_funding_broadcast(&self) -> bool {
1564 !self.channel_state.is_pre_funded_state() &&
1565 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1568 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1569 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1570 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1571 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1572 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1574 /// @local is used only to convert relevant internal structures which refer to remote vs local
1575 /// to decide value of outputs and direction of HTLCs.
1576 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1577 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1578 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1579 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1580 /// which peer generated this transaction and "to whom" this transaction flows.
1582 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1583 where L::Target: Logger
1585 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1586 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1587 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1589 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1590 let mut remote_htlc_total_msat = 0;
1591 let mut local_htlc_total_msat = 0;
1592 let mut value_to_self_msat_offset = 0;
1594 let mut feerate_per_kw = self.feerate_per_kw;
1595 if let Some((feerate, update_state)) = self.pending_update_fee {
1596 if match update_state {
1597 // Note that these match the inclusion criteria when scanning
1598 // pending_inbound_htlcs below.
1599 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1600 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1601 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1603 feerate_per_kw = feerate;
1607 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1608 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1609 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1611 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1613 macro_rules! get_htlc_in_commitment {
1614 ($htlc: expr, $offered: expr) => {
1615 HTLCOutputInCommitment {
1617 amount_msat: $htlc.amount_msat,
1618 cltv_expiry: $htlc.cltv_expiry,
1619 payment_hash: $htlc.payment_hash,
1620 transaction_output_index: None
1625 macro_rules! add_htlc_output {
1626 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1627 if $outbound == local { // "offered HTLC output"
1628 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1629 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1632 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1634 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1635 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1636 included_non_dust_htlcs.push((htlc_in_tx, $source));
1638 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1639 included_dust_htlcs.push((htlc_in_tx, $source));
1642 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1643 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1646 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1648 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1649 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1650 included_non_dust_htlcs.push((htlc_in_tx, $source));
1652 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1653 included_dust_htlcs.push((htlc_in_tx, $source));
1659 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1661 for ref htlc in self.pending_inbound_htlcs.iter() {
1662 let (include, state_name) = match htlc.state {
1663 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1664 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1665 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1666 InboundHTLCState::Committed => (true, "Committed"),
1667 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1671 add_htlc_output!(htlc, false, None, state_name);
1672 remote_htlc_total_msat += htlc.amount_msat;
1674 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1676 &InboundHTLCState::LocalRemoved(ref reason) => {
1677 if generated_by_local {
1678 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1679 inbound_htlc_preimages.push(preimage);
1680 value_to_self_msat_offset += htlc.amount_msat as i64;
1690 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1692 for ref htlc in self.pending_outbound_htlcs.iter() {
1693 let (include, state_name) = match htlc.state {
1694 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1695 OutboundHTLCState::Committed => (true, "Committed"),
1696 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1697 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1698 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1701 let preimage_opt = match htlc.state {
1702 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1703 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1704 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1708 if let Some(preimage) = preimage_opt {
1709 outbound_htlc_preimages.push(preimage);
1713 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1714 local_htlc_total_msat += htlc.amount_msat;
1716 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1718 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1719 value_to_self_msat_offset -= htlc.amount_msat as i64;
1721 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1722 if !generated_by_local {
1723 value_to_self_msat_offset -= htlc.amount_msat as i64;
1731 let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1732 assert!(value_to_self_msat >= 0);
1733 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1734 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1735 // "violate" their reserve value by couting those against it. Thus, we have to convert
1736 // everything to i64 before subtracting as otherwise we can overflow.
1737 let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1738 assert!(value_to_remote_msat >= 0);
1740 #[cfg(debug_assertions)]
1742 // Make sure that the to_self/to_remote is always either past the appropriate
1743 // channel_reserve *or* it is making progress towards it.
1744 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1745 self.holder_max_commitment_tx_output.lock().unwrap()
1747 self.counterparty_max_commitment_tx_output.lock().unwrap()
1749 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1750 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1751 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1752 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1755 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1756 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1757 let (value_to_self, value_to_remote) = if self.is_outbound() {
1758 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1760 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1763 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1764 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1765 let (funding_pubkey_a, funding_pubkey_b) = if local {
1766 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1768 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1771 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1772 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1777 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1778 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1783 let num_nondust_htlcs = included_non_dust_htlcs.len();
1785 let channel_parameters =
1786 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1787 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1788 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1795 &mut included_non_dust_htlcs,
1798 let mut htlcs_included = included_non_dust_htlcs;
1799 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1800 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1801 htlcs_included.append(&mut included_dust_htlcs);
1809 local_balance_msat: value_to_self_msat as u64,
1810 remote_balance_msat: value_to_remote_msat as u64,
1811 inbound_htlc_preimages,
1812 outbound_htlc_preimages,
1817 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1818 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1819 /// our counterparty!)
1820 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1821 /// TODO Some magic rust shit to compile-time check this?
1822 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1823 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1824 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1825 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1826 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1828 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1832 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1833 /// will sign and send to our counterparty.
1834 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1835 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1836 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1837 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1838 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1840 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1843 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1844 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1845 /// Panics if called before accept_channel/InboundV1Channel::new
1846 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1847 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1850 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1851 &self.get_counterparty_pubkeys().funding_pubkey
1854 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1858 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1859 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1860 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1861 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1862 // more dust balance if the feerate increases when we have several HTLCs pending
1863 // which are near the dust limit.
1864 let mut feerate_per_kw = self.feerate_per_kw;
1865 // If there's a pending update fee, use it to ensure we aren't under-estimating
1866 // potential feerate updates coming soon.
1867 if let Some((feerate, _)) = self.pending_update_fee {
1868 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1870 if let Some(feerate) = outbound_feerate_update {
1871 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1873 let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
1874 cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
1877 /// Get forwarding information for the counterparty.
1878 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1879 self.counterparty_forwarding_info.clone()
1882 /// Returns a HTLCStats about inbound pending htlcs
1883 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1885 let mut stats = HTLCStats {
1886 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1887 pending_htlcs_value_msat: 0,
1888 on_counterparty_tx_dust_exposure_msat: 0,
1889 on_holder_tx_dust_exposure_msat: 0,
1890 holding_cell_msat: 0,
1891 on_holder_tx_holding_cell_htlcs_count: 0,
1894 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1897 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1898 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1899 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1901 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1902 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1903 for ref htlc in context.pending_inbound_htlcs.iter() {
1904 stats.pending_htlcs_value_msat += htlc.amount_msat;
1905 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1906 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1908 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1909 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1915 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1916 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1918 let mut stats = HTLCStats {
1919 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1920 pending_htlcs_value_msat: 0,
1921 on_counterparty_tx_dust_exposure_msat: 0,
1922 on_holder_tx_dust_exposure_msat: 0,
1923 holding_cell_msat: 0,
1924 on_holder_tx_holding_cell_htlcs_count: 0,
1927 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1930 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1931 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1932 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1934 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1935 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1936 for ref htlc in context.pending_outbound_htlcs.iter() {
1937 stats.pending_htlcs_value_msat += htlc.amount_msat;
1938 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1939 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1941 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1942 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1946 for update in context.holding_cell_htlc_updates.iter() {
1947 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1948 stats.pending_htlcs += 1;
1949 stats.pending_htlcs_value_msat += amount_msat;
1950 stats.holding_cell_msat += amount_msat;
1951 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1952 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1954 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1955 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1957 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1964 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1965 /// Doesn't bother handling the
1966 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1967 /// corner case properly.
1968 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1969 -> AvailableBalances
1970 where F::Target: FeeEstimator
1972 let context = &self;
1973 // Note that we have to handle overflow due to the above case.
1974 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1975 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1977 let mut balance_msat = context.value_to_self_msat;
1978 for ref htlc in context.pending_inbound_htlcs.iter() {
1979 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1980 balance_msat += htlc.amount_msat;
1983 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1985 let outbound_capacity_msat = context.value_to_self_msat
1986 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1988 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1990 let mut available_capacity_msat = outbound_capacity_msat;
1992 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1993 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1997 if context.is_outbound() {
1998 // We should mind channel commit tx fee when computing how much of the available capacity
1999 // can be used in the next htlc. Mirrors the logic in send_htlc.
2001 // The fee depends on whether the amount we will be sending is above dust or not,
2002 // and the answer will in turn change the amount itself — making it a circular
2004 // This complicates the computation around dust-values, up to the one-htlc-value.
2005 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2006 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2007 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2010 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2011 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2012 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2013 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2014 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2015 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2016 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2019 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2020 // value ends up being below dust, we have this fee available again. In that case,
2021 // match the value to right-below-dust.
2022 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2023 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2024 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2025 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2026 debug_assert!(one_htlc_difference_msat != 0);
2027 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2028 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2029 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2031 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2034 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2035 // sending a new HTLC won't reduce their balance below our reserve threshold.
2036 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2037 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2038 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2041 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2042 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2044 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2045 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2046 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2048 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2049 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2050 // we've selected for them, we can only send dust HTLCs.
2051 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2055 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2057 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2058 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2059 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2060 // send above the dust limit (as the router can always overpay to meet the dust limit).
2061 let mut remaining_msat_below_dust_exposure_limit = None;
2062 let mut dust_exposure_dust_limit_msat = 0;
2063 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2065 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2066 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2068 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2069 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2070 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2072 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2073 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2074 remaining_msat_below_dust_exposure_limit =
2075 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2076 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2079 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2080 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2081 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2082 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2083 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2084 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2087 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2088 if available_capacity_msat < dust_exposure_dust_limit_msat {
2089 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2091 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2095 available_capacity_msat = cmp::min(available_capacity_msat,
2096 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2098 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2099 available_capacity_msat = 0;
2103 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2104 - context.value_to_self_msat as i64
2105 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2106 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2108 outbound_capacity_msat,
2109 next_outbound_htlc_limit_msat: available_capacity_msat,
2110 next_outbound_htlc_minimum_msat,
2115 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2116 let context = &self;
2117 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2120 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2121 /// number of pending HTLCs that are on track to be in our next commitment tx.
2123 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2124 /// `fee_spike_buffer_htlc` is `Some`.
2126 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2127 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2129 /// Dust HTLCs are excluded.
2130 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2131 let context = &self;
2132 assert!(context.is_outbound());
2134 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2137 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2138 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2140 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2141 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2143 let mut addl_htlcs = 0;
2144 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2146 HTLCInitiator::LocalOffered => {
2147 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2151 HTLCInitiator::RemoteOffered => {
2152 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2158 let mut included_htlcs = 0;
2159 for ref htlc in context.pending_inbound_htlcs.iter() {
2160 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2163 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2164 // transaction including this HTLC if it times out before they RAA.
2165 included_htlcs += 1;
2168 for ref htlc in context.pending_outbound_htlcs.iter() {
2169 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2173 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2174 OutboundHTLCState::Committed => included_htlcs += 1,
2175 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2176 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2177 // transaction won't be generated until they send us their next RAA, which will mean
2178 // dropping any HTLCs in this state.
2183 for htlc in context.holding_cell_htlc_updates.iter() {
2185 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2186 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2191 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2192 // ack we're guaranteed to never include them in commitment txs anymore.
2196 let num_htlcs = included_htlcs + addl_htlcs;
2197 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2198 #[cfg(any(test, fuzzing))]
2201 if fee_spike_buffer_htlc.is_some() {
2202 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2204 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2205 + context.holding_cell_htlc_updates.len();
2206 let commitment_tx_info = CommitmentTxInfoCached {
2208 total_pending_htlcs,
2209 next_holder_htlc_id: match htlc.origin {
2210 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2211 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2213 next_counterparty_htlc_id: match htlc.origin {
2214 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2215 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2217 feerate: context.feerate_per_kw,
2219 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2224 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2225 /// pending HTLCs that are on track to be in their next commitment tx
2227 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2228 /// `fee_spike_buffer_htlc` is `Some`.
2230 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2231 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2233 /// Dust HTLCs are excluded.
2234 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2235 let context = &self;
2236 assert!(!context.is_outbound());
2238 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2241 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2242 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2244 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2245 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2247 let mut addl_htlcs = 0;
2248 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2250 HTLCInitiator::LocalOffered => {
2251 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2255 HTLCInitiator::RemoteOffered => {
2256 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2262 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2263 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2264 // committed outbound HTLCs, see below.
2265 let mut included_htlcs = 0;
2266 for ref htlc in context.pending_inbound_htlcs.iter() {
2267 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2270 included_htlcs += 1;
2273 for ref htlc in context.pending_outbound_htlcs.iter() {
2274 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2277 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2278 // i.e. if they've responded to us with an RAA after announcement.
2280 OutboundHTLCState::Committed => included_htlcs += 1,
2281 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2282 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2287 let num_htlcs = included_htlcs + addl_htlcs;
2288 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2289 #[cfg(any(test, fuzzing))]
2292 if fee_spike_buffer_htlc.is_some() {
2293 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2295 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2296 let commitment_tx_info = CommitmentTxInfoCached {
2298 total_pending_htlcs,
2299 next_holder_htlc_id: match htlc.origin {
2300 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2301 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2303 next_counterparty_htlc_id: match htlc.origin {
2304 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2305 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2307 feerate: context.feerate_per_kw,
2309 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2314 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2315 where F: Fn() -> Option<O> {
2316 match self.channel_state {
2317 ChannelState::FundingNegotiated => f(),
2318 ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) {
2327 /// Returns the transaction if there is a pending funding transaction that is yet to be
2329 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2330 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2333 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2335 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2336 self.if_unbroadcasted_funding(||
2337 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2341 /// Returns whether the channel is funded in a batch.
2342 pub fn is_batch_funding(&self) -> bool {
2343 self.is_batch_funding.is_some()
2346 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2348 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2349 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2352 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2353 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2354 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2355 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2356 /// immediately (others we will have to allow to time out).
2357 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2358 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2359 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2360 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2361 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2362 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2364 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2365 // return them to fail the payment.
2366 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2367 let counterparty_node_id = self.get_counterparty_node_id();
2368 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2370 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2371 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2376 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2377 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2378 // returning a channel monitor update here would imply a channel monitor update before
2379 // we even registered the channel monitor to begin with, which is invalid.
2380 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2381 // funding transaction, don't return a funding txo (which prevents providing the
2382 // monitor update to the user, even if we return one).
2383 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2384 let generate_monitor_update = match self.channel_state {
2385 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
2388 if generate_monitor_update {
2389 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2390 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2391 update_id: self.latest_monitor_update_id,
2392 counterparty_node_id: Some(self.counterparty_node_id),
2393 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2397 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2399 self.channel_state = ChannelState::ShutdownComplete;
2400 self.update_time_counter += 1;
2403 dropped_outbound_htlcs,
2404 unbroadcasted_batch_funding_txid,
2405 channel_id: self.channel_id,
2406 counterparty_node_id: self.counterparty_node_id,
2410 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2411 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2412 let counterparty_keys = self.build_remote_transaction_keys();
2413 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2415 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2416 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2417 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2418 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2420 match &self.holder_signer {
2421 // TODO (arik): move match into calling method for Taproot
2422 ChannelSignerType::Ecdsa(ecdsa) => {
2423 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2424 .map(|(signature, _)| msgs::FundingSigned {
2425 channel_id: self.channel_id(),
2428 partial_signature_with_nonce: None,
2432 if funding_signed.is_none() {
2433 #[cfg(not(async_signing))] {
2434 panic!("Failed to get signature for funding_signed");
2436 #[cfg(async_signing)] {
2437 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2438 self.signer_pending_funding = true;
2440 } else if self.signer_pending_funding {
2441 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2442 self.signer_pending_funding = false;
2445 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2446 (counterparty_initial_commitment_tx, funding_signed)
2448 // TODO (taproot|arik)
2455 // Internal utility functions for channels
2457 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2458 /// `channel_value_satoshis` in msat, set through
2459 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2461 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2463 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2464 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2465 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2467 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2470 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2472 channel_value_satoshis * 10 * configured_percent
2475 /// Returns a minimum channel reserve value the remote needs to maintain,
2476 /// required by us according to the configured or default
2477 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2479 /// Guaranteed to return a value no larger than channel_value_satoshis
2481 /// This is used both for outbound and inbound channels and has lower bound
2482 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2483 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2484 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2485 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2488 /// This is for legacy reasons, present for forward-compatibility.
2489 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2490 /// from storage. Hence, we use this function to not persist default values of
2491 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2492 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2493 let (q, _) = channel_value_satoshis.overflowing_div(100);
2494 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2497 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2498 // Note that num_htlcs should not include dust HTLCs.
2500 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2501 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2504 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2505 // Note that num_htlcs should not include dust HTLCs.
2506 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2507 // Note that we need to divide before multiplying to round properly,
2508 // since the lowest denomination of bitcoin on-chain is the satoshi.
2509 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2512 // Holder designates channel data owned for the benefit of the user client.
2513 // Counterparty designates channel data owned by the another channel participant entity.
2514 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2515 pub context: ChannelContext<SP>,
2518 #[cfg(any(test, fuzzing))]
2519 struct CommitmentTxInfoCached {
2521 total_pending_htlcs: usize,
2522 next_holder_htlc_id: u64,
2523 next_counterparty_htlc_id: u64,
2527 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
2528 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
2529 trait FailHTLCContents {
2530 type Message: FailHTLCMessageName;
2531 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
2532 fn to_inbound_htlc_state(self) -> InboundHTLCState;
2533 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
2535 impl FailHTLCContents for msgs::OnionErrorPacket {
2536 type Message = msgs::UpdateFailHTLC;
2537 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2538 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
2540 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2541 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
2543 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2544 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
2547 impl FailHTLCContents for (u16, [u8; 32]) {
2548 type Message = msgs::UpdateFailMalformedHTLC; // (failure_code, sha256_of_onion)
2549 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2550 msgs::UpdateFailMalformedHTLC {
2553 failure_code: self.0,
2554 sha256_of_onion: self.1
2557 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2558 InboundHTLCState::LocalRemoved(
2559 InboundHTLCRemovalReason::FailMalformed((self.1, self.0))
2562 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2563 HTLCUpdateAwaitingACK::FailMalformedHTLC {
2565 failure_code: self.0,
2566 sha256_of_onion: self.1
2571 trait FailHTLCMessageName {
2572 fn name() -> &'static str;
2574 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
2575 fn name() -> &'static str {
2579 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
2580 fn name() -> &'static str {
2581 "update_fail_malformed_htlc"
2585 impl<SP: Deref> Channel<SP> where
2586 SP::Target: SignerProvider,
2587 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2589 fn check_remote_fee<F: Deref, L: Deref>(
2590 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2591 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2592 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2594 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2595 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2597 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2599 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2600 if feerate_per_kw < lower_limit {
2601 if let Some(cur_feerate) = cur_feerate_per_kw {
2602 if feerate_per_kw > cur_feerate {
2604 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2605 cur_feerate, feerate_per_kw);
2609 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2615 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2616 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2617 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2618 // outside of those situations will fail.
2619 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2623 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2628 1 + // script length (0)
2632 )*4 + // * 4 for non-witness parts
2633 2 + // witness marker and flag
2634 1 + // witness element count
2635 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2636 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2637 2*(1 + 71); // two signatures + sighash type flags
2638 if let Some(spk) = a_scriptpubkey {
2639 ret += ((8+1) + // output values and script length
2640 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2642 if let Some(spk) = b_scriptpubkey {
2643 ret += ((8+1) + // output values and script length
2644 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2650 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2651 assert!(self.context.pending_inbound_htlcs.is_empty());
2652 assert!(self.context.pending_outbound_htlcs.is_empty());
2653 assert!(self.context.pending_update_fee.is_none());
2655 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2656 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2657 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2659 if value_to_holder < 0 {
2660 assert!(self.context.is_outbound());
2661 total_fee_satoshis += (-value_to_holder) as u64;
2662 } else if value_to_counterparty < 0 {
2663 assert!(!self.context.is_outbound());
2664 total_fee_satoshis += (-value_to_counterparty) as u64;
2667 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2668 value_to_counterparty = 0;
2671 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2672 value_to_holder = 0;
2675 assert!(self.context.shutdown_scriptpubkey.is_some());
2676 let holder_shutdown_script = self.get_closing_scriptpubkey();
2677 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2678 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2680 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2681 (closing_transaction, total_fee_satoshis)
2684 fn funding_outpoint(&self) -> OutPoint {
2685 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2688 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2691 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2692 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2694 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2696 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2697 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2698 where L::Target: Logger {
2699 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2700 // (see equivalent if condition there).
2701 assert!(self.context.channel_state.should_force_holding_cell());
2702 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2703 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2704 self.context.latest_monitor_update_id = mon_update_id;
2705 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2706 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2710 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2711 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2712 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2713 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2715 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2716 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2719 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2720 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2721 // these, but for now we just have to treat them as normal.
2723 let mut pending_idx = core::usize::MAX;
2724 let mut htlc_value_msat = 0;
2725 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2726 if htlc.htlc_id == htlc_id_arg {
2727 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2728 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2729 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2731 InboundHTLCState::Committed => {},
2732 InboundHTLCState::LocalRemoved(ref reason) => {
2733 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2735 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2736 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2738 return UpdateFulfillFetch::DuplicateClaim {};
2741 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2742 // Don't return in release mode here so that we can update channel_monitor
2746 htlc_value_msat = htlc.amount_msat;
2750 if pending_idx == core::usize::MAX {
2751 #[cfg(any(test, fuzzing))]
2752 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2753 // this is simply a duplicate claim, not previously failed and we lost funds.
2754 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2755 return UpdateFulfillFetch::DuplicateClaim {};
2758 // Now update local state:
2760 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2761 // can claim it even if the channel hits the chain before we see their next commitment.
2762 self.context.latest_monitor_update_id += 1;
2763 let monitor_update = ChannelMonitorUpdate {
2764 update_id: self.context.latest_monitor_update_id,
2765 counterparty_node_id: Some(self.context.counterparty_node_id),
2766 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2767 payment_preimage: payment_preimage_arg.clone(),
2771 if self.context.channel_state.should_force_holding_cell() {
2772 // Note that this condition is the same as the assertion in
2773 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2774 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2775 // do not not get into this branch.
2776 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2777 match pending_update {
2778 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2779 if htlc_id_arg == htlc_id {
2780 // Make sure we don't leave latest_monitor_update_id incremented here:
2781 self.context.latest_monitor_update_id -= 1;
2782 #[cfg(any(test, fuzzing))]
2783 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2784 return UpdateFulfillFetch::DuplicateClaim {};
2787 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2788 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2790 if htlc_id_arg == htlc_id {
2791 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2792 // TODO: We may actually be able to switch to a fulfill here, though its
2793 // rare enough it may not be worth the complexity burden.
2794 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2795 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2801 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
2802 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2803 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2805 #[cfg(any(test, fuzzing))]
2806 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2807 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2809 #[cfg(any(test, fuzzing))]
2810 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2813 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2814 if let InboundHTLCState::Committed = htlc.state {
2816 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2817 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2819 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2820 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2823 UpdateFulfillFetch::NewClaim {
2826 msg: Some(msgs::UpdateFulfillHTLC {
2827 channel_id: self.context.channel_id(),
2828 htlc_id: htlc_id_arg,
2829 payment_preimage: payment_preimage_arg,
2834 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2835 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2836 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2837 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2838 // Even if we aren't supposed to let new monitor updates with commitment state
2839 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2840 // matter what. Sadly, to push a new monitor update which flies before others
2841 // already queued, we have to insert it into the pending queue and update the
2842 // update_ids of all the following monitors.
2843 if release_cs_monitor && msg.is_some() {
2844 let mut additional_update = self.build_commitment_no_status_check(logger);
2845 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2846 // to be strictly increasing by one, so decrement it here.
2847 self.context.latest_monitor_update_id = monitor_update.update_id;
2848 monitor_update.updates.append(&mut additional_update.updates);
2850 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2851 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2852 monitor_update.update_id = new_mon_id;
2853 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2854 held_update.update.update_id += 1;
2857 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2858 let update = self.build_commitment_no_status_check(logger);
2859 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2865 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2866 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2868 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2872 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2873 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2874 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2875 /// before we fail backwards.
2877 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2878 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2879 /// [`ChannelError::Ignore`].
2880 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2881 -> Result<(), ChannelError> where L::Target: Logger {
2882 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2883 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2886 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
2887 /// want to fail blinded HTLCs where we are not the intro node.
2889 /// See [`Self::queue_fail_htlc`] for more info.
2890 pub fn queue_fail_malformed_htlc<L: Deref>(
2891 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
2892 ) -> Result<(), ChannelError> where L::Target: Logger {
2893 self.fail_htlc(htlc_id_arg, (failure_code, sha256_of_onion), true, logger)
2894 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2897 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2898 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2899 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2900 /// before we fail backwards.
2902 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2903 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2904 /// [`ChannelError::Ignore`].
2905 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
2906 &mut self, htlc_id_arg: u64, err_packet: E, mut force_holding_cell: bool,
2908 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
2909 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2910 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2913 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2914 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2915 // these, but for now we just have to treat them as normal.
2917 let mut pending_idx = core::usize::MAX;
2918 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2919 if htlc.htlc_id == htlc_id_arg {
2921 InboundHTLCState::Committed => {},
2922 InboundHTLCState::LocalRemoved(ref reason) => {
2923 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2925 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2930 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2931 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2937 if pending_idx == core::usize::MAX {
2938 #[cfg(any(test, fuzzing))]
2939 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2940 // is simply a duplicate fail, not previously failed and we failed-back too early.
2941 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2945 if self.context.channel_state.should_force_holding_cell() {
2946 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2947 force_holding_cell = true;
2950 // Now update local state:
2951 if force_holding_cell {
2952 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2953 match pending_update {
2954 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2955 if htlc_id_arg == htlc_id {
2956 #[cfg(any(test, fuzzing))]
2957 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2961 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2962 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2964 if htlc_id_arg == htlc_id {
2965 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2966 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2972 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2973 self.context.holding_cell_htlc_updates.push(err_packet.to_htlc_update_awaiting_ack(htlc_id_arg));
2977 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
2978 E::Message::name(), &self.context.channel_id());
2980 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2981 htlc.state = err_packet.clone().to_inbound_htlc_state();
2984 Ok(Some(err_packet.to_message(htlc_id_arg, self.context.channel_id())))
2987 // Message handlers:
2988 /// Updates the state of the channel to indicate that all channels in the batch have received
2989 /// funding_signed and persisted their monitors.
2990 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2991 /// treated as a non-batch channel going forward.
2992 pub fn set_batch_ready(&mut self) {
2993 self.context.is_batch_funding = None;
2994 self.context.channel_state.clear_waiting_for_batch();
2997 /// Unsets the existing funding information.
2999 /// This must only be used if the channel has not yet completed funding and has not been used.
3001 /// Further, the channel must be immediately shut down after this with a call to
3002 /// [`ChannelContext::force_shutdown`].
3003 pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
3004 debug_assert!(matches!(
3005 self.context.channel_state, ChannelState::AwaitingChannelReady(_)
3007 self.context.channel_transaction_parameters.funding_outpoint = None;
3008 self.context.channel_id = temporary_channel_id;
3011 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3012 /// and the channel is now usable (and public), this may generate an announcement_signatures to
3014 pub fn channel_ready<NS: Deref, L: Deref>(
3015 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
3016 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3017 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3019 NS::Target: NodeSigner,
3022 if self.context.channel_state.is_peer_disconnected() {
3023 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3024 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3027 if let Some(scid_alias) = msg.short_channel_id_alias {
3028 if Some(scid_alias) != self.context.short_channel_id {
3029 // The scid alias provided can be used to route payments *from* our counterparty,
3030 // i.e. can be used for inbound payments and provided in invoices, but is not used
3031 // when routing outbound payments.
3032 self.context.latest_inbound_scid_alias = Some(scid_alias);
3036 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3037 // batch, but we can receive channel_ready messages.
3038 let mut check_reconnection = false;
3039 match &self.context.channel_state {
3040 ChannelState::AwaitingChannelReady(flags) => {
3041 let flags = *flags & !FundedStateFlags::ALL;
3042 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3043 if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3044 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3045 check_reconnection = true;
3046 } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3047 self.context.channel_state.set_their_channel_ready();
3048 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3049 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3050 self.context.update_time_counter += 1;
3052 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3053 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3056 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3057 ChannelState::ChannelReady(_) => check_reconnection = true,
3058 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
3060 if check_reconnection {
3061 // They probably disconnected/reconnected and re-sent the channel_ready, which is
3062 // required, or they're sending a fresh SCID alias.
3063 let expected_point =
3064 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3065 // If they haven't ever sent an updated point, the point they send should match
3067 self.context.counterparty_cur_commitment_point
3068 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3069 // If we've advanced the commitment number once, the second commitment point is
3070 // at `counterparty_prev_commitment_point`, which is not yet revoked.
3071 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3072 self.context.counterparty_prev_commitment_point
3074 // If they have sent updated points, channel_ready is always supposed to match
3075 // their "first" point, which we re-derive here.
3076 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3077 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3078 ).expect("We already advanced, so previous secret keys should have been validated already")))
3080 if expected_point != Some(msg.next_per_commitment_point) {
3081 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3086 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3087 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3089 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3091 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3094 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3095 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3096 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3097 ) -> Result<(), ChannelError>
3098 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3099 FE::Target: FeeEstimator, L::Target: Logger,
3101 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3102 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3104 // We can't accept HTLCs sent after we've sent a shutdown.
3105 if self.context.channel_state.is_local_shutdown_sent() {
3106 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3108 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3109 if self.context.channel_state.is_remote_shutdown_sent() {
3110 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3112 if self.context.channel_state.is_peer_disconnected() {
3113 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3115 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3116 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3118 if msg.amount_msat == 0 {
3119 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3121 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3122 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3125 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3126 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3127 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3128 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3130 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3131 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3134 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3135 // the reserve_satoshis we told them to always have as direct payment so that they lose
3136 // something if we punish them for broadcasting an old state).
3137 // Note that we don't really care about having a small/no to_remote output in our local
3138 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3139 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3140 // present in the next commitment transaction we send them (at least for fulfilled ones,
3141 // failed ones won't modify value_to_self).
3142 // Note that we will send HTLCs which another instance of rust-lightning would think
3143 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3144 // Channel state once they will not be present in the next received commitment
3146 let mut removed_outbound_total_msat = 0;
3147 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3148 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3149 removed_outbound_total_msat += htlc.amount_msat;
3150 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3151 removed_outbound_total_msat += htlc.amount_msat;
3155 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3156 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3159 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3160 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3161 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3163 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3164 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3165 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3166 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3167 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3168 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3169 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3173 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3174 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3175 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3176 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3177 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3178 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3179 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3183 let pending_value_to_self_msat =
3184 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3185 let pending_remote_value_msat =
3186 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3187 if pending_remote_value_msat < msg.amount_msat {
3188 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3191 // Check that the remote can afford to pay for this HTLC on-chain at the current
3192 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3194 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3195 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3196 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3198 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3199 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3203 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3204 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3206 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3207 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3211 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3212 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3216 if !self.context.is_outbound() {
3217 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3218 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3219 // side, only on the sender's. Note that with anchor outputs we are no longer as
3220 // sensitive to fee spikes, so we need to account for them.
3221 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3222 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3223 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3224 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3226 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3227 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3228 // the HTLC, i.e. its status is already set to failing.
3229 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3230 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3233 // Check that they won't violate our local required channel reserve by adding this HTLC.
3234 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3235 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3236 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3237 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3240 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3241 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3243 if msg.cltv_expiry >= 500000000 {
3244 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3247 if self.context.channel_state.is_local_shutdown_sent() {
3248 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3249 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3253 // Now update local state:
3254 self.context.next_counterparty_htlc_id += 1;
3255 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3256 htlc_id: msg.htlc_id,
3257 amount_msat: msg.amount_msat,
3258 payment_hash: msg.payment_hash,
3259 cltv_expiry: msg.cltv_expiry,
3260 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3265 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3267 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3268 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3269 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3270 if htlc.htlc_id == htlc_id {
3271 let outcome = match check_preimage {
3272 None => fail_reason.into(),
3273 Some(payment_preimage) => {
3274 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3275 if payment_hash != htlc.payment_hash {
3276 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3278 OutboundHTLCOutcome::Success(Some(payment_preimage))
3282 OutboundHTLCState::LocalAnnounced(_) =>
3283 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3284 OutboundHTLCState::Committed => {
3285 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3287 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3288 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3293 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3296 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3297 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3298 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3300 if self.context.channel_state.is_peer_disconnected() {
3301 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3304 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3307 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3308 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3309 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3311 if self.context.channel_state.is_peer_disconnected() {
3312 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3315 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3319 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3320 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3321 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3323 if self.context.channel_state.is_peer_disconnected() {
3324 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3327 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3331 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3332 where L::Target: Logger
3334 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3335 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3337 if self.context.channel_state.is_peer_disconnected() {
3338 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3340 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3341 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3344 let funding_script = self.context.get_funding_redeemscript();
3346 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3348 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3349 let commitment_txid = {
3350 let trusted_tx = commitment_stats.tx.trust();
3351 let bitcoin_tx = trusted_tx.built_transaction();
3352 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3354 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3355 log_bytes!(msg.signature.serialize_compact()[..]),
3356 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3357 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3358 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3359 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3363 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3365 // If our counterparty updated the channel fee in this commitment transaction, check that
3366 // they can actually afford the new fee now.
3367 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3368 update_state == FeeUpdateState::RemoteAnnounced
3371 debug_assert!(!self.context.is_outbound());
3372 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3373 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3374 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3377 #[cfg(any(test, fuzzing))]
3379 if self.context.is_outbound() {
3380 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3381 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3382 if let Some(info) = projected_commit_tx_info {
3383 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3384 + self.context.holding_cell_htlc_updates.len();
3385 if info.total_pending_htlcs == total_pending_htlcs
3386 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3387 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3388 && info.feerate == self.context.feerate_per_kw {
3389 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3395 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3396 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3399 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3400 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3401 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3402 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3403 // backwards compatibility, we never use it in production. To provide test coverage, here,
3404 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3405 #[allow(unused_assignments, unused_mut)]
3406 let mut separate_nondust_htlc_sources = false;
3407 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3408 use core::hash::{BuildHasher, Hasher};
3409 // Get a random value using the only std API to do so - the DefaultHasher
3410 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3411 separate_nondust_htlc_sources = rand_val % 2 == 0;
3414 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3415 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3416 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3417 if let Some(_) = htlc.transaction_output_index {
3418 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3419 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3420 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3422 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3423 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3424 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3425 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3426 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3427 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3428 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3429 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3431 if !separate_nondust_htlc_sources {
3432 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3435 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3437 if separate_nondust_htlc_sources {
3438 if let Some(source) = source_opt.take() {
3439 nondust_htlc_sources.push(source);
3442 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3445 let holder_commitment_tx = HolderCommitmentTransaction::new(
3446 commitment_stats.tx,
3448 msg.htlc_signatures.clone(),
3449 &self.context.get_holder_pubkeys().funding_pubkey,
3450 self.context.counterparty_funding_pubkey()
3453 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3454 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3456 // Update state now that we've passed all the can-fail calls...
3457 let mut need_commitment = false;
3458 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3459 if *update_state == FeeUpdateState::RemoteAnnounced {
3460 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3461 need_commitment = true;
3465 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3466 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3467 Some(forward_info.clone())
3469 if let Some(forward_info) = new_forward {
3470 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3471 &htlc.payment_hash, &self.context.channel_id);
3472 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3473 need_commitment = true;
3476 let mut claimed_htlcs = Vec::new();
3477 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3478 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3479 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3480 &htlc.payment_hash, &self.context.channel_id);
3481 // Grab the preimage, if it exists, instead of cloning
3482 let mut reason = OutboundHTLCOutcome::Success(None);
3483 mem::swap(outcome, &mut reason);
3484 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3485 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3486 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3487 // have a `Success(None)` reason. In this case we could forget some HTLC
3488 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3489 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3491 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3493 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3494 need_commitment = true;
3498 self.context.latest_monitor_update_id += 1;
3499 let mut monitor_update = ChannelMonitorUpdate {
3500 update_id: self.context.latest_monitor_update_id,
3501 counterparty_node_id: Some(self.context.counterparty_node_id),
3502 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3503 commitment_tx: holder_commitment_tx,
3504 htlc_outputs: htlcs_and_sigs,
3506 nondust_htlc_sources,
3510 self.context.cur_holder_commitment_transaction_number -= 1;
3511 self.context.expecting_peer_commitment_signed = false;
3512 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3513 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3514 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3516 if self.context.channel_state.is_monitor_update_in_progress() {
3517 // In case we initially failed monitor updating without requiring a response, we need
3518 // to make sure the RAA gets sent first.
3519 self.context.monitor_pending_revoke_and_ack = true;
3520 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3521 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3522 // the corresponding HTLC status updates so that
3523 // get_last_commitment_update_for_send includes the right HTLCs.
3524 self.context.monitor_pending_commitment_signed = true;
3525 let mut additional_update = self.build_commitment_no_status_check(logger);
3526 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3527 // strictly increasing by one, so decrement it here.
3528 self.context.latest_monitor_update_id = monitor_update.update_id;
3529 monitor_update.updates.append(&mut additional_update.updates);
3531 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3532 &self.context.channel_id);
3533 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3536 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3537 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3538 // we'll send one right away when we get the revoke_and_ack when we
3539 // free_holding_cell_htlcs().
3540 let mut additional_update = self.build_commitment_no_status_check(logger);
3541 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3542 // strictly increasing by one, so decrement it here.
3543 self.context.latest_monitor_update_id = monitor_update.update_id;
3544 monitor_update.updates.append(&mut additional_update.updates);
3548 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3549 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3550 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3551 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3554 /// Public version of the below, checking relevant preconditions first.
3555 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3556 /// returns `(None, Vec::new())`.
3557 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3558 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3559 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3560 where F::Target: FeeEstimator, L::Target: Logger
3562 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
3563 self.free_holding_cell_htlcs(fee_estimator, logger)
3564 } else { (None, Vec::new()) }
3567 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3568 /// for our counterparty.
3569 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3570 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3571 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3572 where F::Target: FeeEstimator, L::Target: Logger
3574 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3575 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3576 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3577 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3579 let mut monitor_update = ChannelMonitorUpdate {
3580 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3581 counterparty_node_id: Some(self.context.counterparty_node_id),
3582 updates: Vec::new(),
3585 let mut htlc_updates = Vec::new();
3586 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3587 let mut update_add_count = 0;
3588 let mut update_fulfill_count = 0;
3589 let mut update_fail_count = 0;
3590 let mut htlcs_to_fail = Vec::new();
3591 for htlc_update in htlc_updates.drain(..) {
3592 // Note that this *can* fail, though it should be due to rather-rare conditions on
3593 // fee races with adding too many outputs which push our total payments just over
3594 // the limit. In case it's less rare than I anticipate, we may want to revisit
3595 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3596 // to rebalance channels.
3597 match &htlc_update {
3598 &HTLCUpdateAwaitingACK::AddHTLC {
3599 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3600 skimmed_fee_msat, blinding_point, ..
3602 match self.send_htlc(
3603 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3604 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3606 Ok(_) => update_add_count += 1,
3609 ChannelError::Ignore(ref msg) => {
3610 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3611 // If we fail to send here, then this HTLC should
3612 // be failed backwards. Failing to send here
3613 // indicates that this HTLC may keep being put back
3614 // into the holding cell without ever being
3615 // successfully forwarded/failed/fulfilled, causing
3616 // our counterparty to eventually close on us.
3617 htlcs_to_fail.push((source.clone(), *payment_hash));
3620 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3626 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3627 // If an HTLC claim was previously added to the holding cell (via
3628 // `get_update_fulfill_htlc`, then generating the claim message itself must
3629 // not fail - any in between attempts to claim the HTLC will have resulted
3630 // in it hitting the holding cell again and we cannot change the state of a
3631 // holding cell HTLC from fulfill to anything else.
3632 let mut additional_monitor_update =
3633 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3634 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3635 { monitor_update } else { unreachable!() };
3636 update_fulfill_count += 1;
3637 monitor_update.updates.append(&mut additional_monitor_update.updates);
3639 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3640 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3641 Ok(update_fail_msg_option) => {
3642 // If an HTLC failure was previously added to the holding cell (via
3643 // `queue_fail_htlc`) then generating the fail message itself must
3644 // not fail - we should never end up in a state where we double-fail
3645 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3646 // for a full revocation before failing.
3647 debug_assert!(update_fail_msg_option.is_some());
3648 update_fail_count += 1;
3651 if let ChannelError::Ignore(_) = e {}
3653 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3658 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
3659 match self.fail_htlc(htlc_id, (failure_code, sha256_of_onion), false, logger) {
3660 Ok(update_fail_malformed_opt) => {
3661 debug_assert!(update_fail_malformed_opt.is_some()); // See above comment
3662 update_fail_count += 1;
3665 if let ChannelError::Ignore(_) = e {}
3667 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3674 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3675 return (None, htlcs_to_fail);
3677 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3678 self.send_update_fee(feerate, false, fee_estimator, logger)
3683 let mut additional_update = self.build_commitment_no_status_check(logger);
3684 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3685 // but we want them to be strictly increasing by one, so reset it here.
3686 self.context.latest_monitor_update_id = monitor_update.update_id;
3687 monitor_update.updates.append(&mut additional_update.updates);
3689 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3690 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3691 update_add_count, update_fulfill_count, update_fail_count);
3693 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3694 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3700 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3701 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3702 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3703 /// generating an appropriate error *after* the channel state has been updated based on the
3704 /// revoke_and_ack message.
3705 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3706 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3707 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3708 where F::Target: FeeEstimator, L::Target: Logger,
3710 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3711 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3713 if self.context.channel_state.is_peer_disconnected() {
3714 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3716 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3717 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3720 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3722 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3723 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3724 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3728 if !self.context.channel_state.is_awaiting_remote_revoke() {
3729 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3730 // haven't given them a new commitment transaction to broadcast). We should probably
3731 // take advantage of this by updating our channel monitor, sending them an error, and
3732 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3733 // lot of work, and there's some chance this is all a misunderstanding anyway.
3734 // We have to do *something*, though, since our signer may get mad at us for otherwise
3735 // jumping a remote commitment number, so best to just force-close and move on.
3736 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3739 #[cfg(any(test, fuzzing))]
3741 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3742 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3745 match &self.context.holder_signer {
3746 ChannelSignerType::Ecdsa(ecdsa) => {
3747 ecdsa.validate_counterparty_revocation(
3748 self.context.cur_counterparty_commitment_transaction_number + 1,
3750 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3752 // TODO (taproot|arik)
3757 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3758 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3759 self.context.latest_monitor_update_id += 1;
3760 let mut monitor_update = ChannelMonitorUpdate {
3761 update_id: self.context.latest_monitor_update_id,
3762 counterparty_node_id: Some(self.context.counterparty_node_id),
3763 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3764 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3765 secret: msg.per_commitment_secret,
3769 // Update state now that we've passed all the can-fail calls...
3770 // (note that we may still fail to generate the new commitment_signed message, but that's
3771 // OK, we step the channel here and *then* if the new generation fails we can fail the
3772 // channel based on that, but stepping stuff here should be safe either way.
3773 self.context.channel_state.clear_awaiting_remote_revoke();
3774 self.context.sent_message_awaiting_response = None;
3775 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3776 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3777 self.context.cur_counterparty_commitment_transaction_number -= 1;
3779 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3780 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3783 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3784 let mut to_forward_infos = Vec::new();
3785 let mut revoked_htlcs = Vec::new();
3786 let mut finalized_claimed_htlcs = Vec::new();
3787 let mut update_fail_htlcs = Vec::new();
3788 let mut update_fail_malformed_htlcs = Vec::new();
3789 let mut require_commitment = false;
3790 let mut value_to_self_msat_diff: i64 = 0;
3793 // Take references explicitly so that we can hold multiple references to self.context.
3794 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3795 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3796 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3798 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3799 pending_inbound_htlcs.retain(|htlc| {
3800 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3801 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3802 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3803 value_to_self_msat_diff += htlc.amount_msat as i64;
3805 *expecting_peer_commitment_signed = true;
3809 pending_outbound_htlcs.retain(|htlc| {
3810 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3811 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3812 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3813 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3815 finalized_claimed_htlcs.push(htlc.source.clone());
3816 // They fulfilled, so we sent them money
3817 value_to_self_msat_diff -= htlc.amount_msat as i64;
3822 for htlc in pending_inbound_htlcs.iter_mut() {
3823 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3825 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3829 let mut state = InboundHTLCState::Committed;
3830 mem::swap(&mut state, &mut htlc.state);
3832 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3833 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3834 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3835 require_commitment = true;
3836 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3837 match forward_info {
3838 PendingHTLCStatus::Fail(fail_msg) => {
3839 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3840 require_commitment = true;
3842 HTLCFailureMsg::Relay(msg) => {
3843 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3844 update_fail_htlcs.push(msg)
3846 HTLCFailureMsg::Malformed(msg) => {
3847 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3848 update_fail_malformed_htlcs.push(msg)
3852 PendingHTLCStatus::Forward(forward_info) => {
3853 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3854 to_forward_infos.push((forward_info, htlc.htlc_id));
3855 htlc.state = InboundHTLCState::Committed;
3861 for htlc in pending_outbound_htlcs.iter_mut() {
3862 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3863 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3864 htlc.state = OutboundHTLCState::Committed;
3865 *expecting_peer_commitment_signed = true;
3867 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3868 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3869 // Grab the preimage, if it exists, instead of cloning
3870 let mut reason = OutboundHTLCOutcome::Success(None);
3871 mem::swap(outcome, &mut reason);
3872 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3873 require_commitment = true;
3877 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3879 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3880 match update_state {
3881 FeeUpdateState::Outbound => {
3882 debug_assert!(self.context.is_outbound());
3883 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3884 self.context.feerate_per_kw = feerate;
3885 self.context.pending_update_fee = None;
3886 self.context.expecting_peer_commitment_signed = true;
3888 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3889 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3890 debug_assert!(!self.context.is_outbound());
3891 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3892 require_commitment = true;
3893 self.context.feerate_per_kw = feerate;
3894 self.context.pending_update_fee = None;
3899 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3900 let release_state_str =
3901 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3902 macro_rules! return_with_htlcs_to_fail {
3903 ($htlcs_to_fail: expr) => {
3904 if !release_monitor {
3905 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3906 update: monitor_update,
3908 return Ok(($htlcs_to_fail, None));
3910 return Ok(($htlcs_to_fail, Some(monitor_update)));
3915 if self.context.channel_state.is_monitor_update_in_progress() {
3916 // We can't actually generate a new commitment transaction (incl by freeing holding
3917 // cells) while we can't update the monitor, so we just return what we have.
3918 if require_commitment {
3919 self.context.monitor_pending_commitment_signed = true;
3920 // When the monitor updating is restored we'll call
3921 // get_last_commitment_update_for_send(), which does not update state, but we're
3922 // definitely now awaiting a remote revoke before we can step forward any more, so
3924 let mut additional_update = self.build_commitment_no_status_check(logger);
3925 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3926 // strictly increasing by one, so decrement it here.
3927 self.context.latest_monitor_update_id = monitor_update.update_id;
3928 monitor_update.updates.append(&mut additional_update.updates);
3930 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3931 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3932 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3933 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3934 return_with_htlcs_to_fail!(Vec::new());
3937 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3938 (Some(mut additional_update), htlcs_to_fail) => {
3939 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3940 // strictly increasing by one, so decrement it here.
3941 self.context.latest_monitor_update_id = monitor_update.update_id;
3942 monitor_update.updates.append(&mut additional_update.updates);
3944 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3945 &self.context.channel_id(), release_state_str);
3947 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3948 return_with_htlcs_to_fail!(htlcs_to_fail);
3950 (None, htlcs_to_fail) => {
3951 if require_commitment {
3952 let mut additional_update = self.build_commitment_no_status_check(logger);
3954 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3955 // strictly increasing by one, so decrement it here.
3956 self.context.latest_monitor_update_id = monitor_update.update_id;
3957 monitor_update.updates.append(&mut additional_update.updates);
3959 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3960 &self.context.channel_id(),
3961 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3964 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3965 return_with_htlcs_to_fail!(htlcs_to_fail);
3967 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3968 &self.context.channel_id(), release_state_str);
3970 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3971 return_with_htlcs_to_fail!(htlcs_to_fail);
3977 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3978 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3979 /// commitment update.
3980 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3981 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3982 where F::Target: FeeEstimator, L::Target: Logger
3984 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3985 assert!(msg_opt.is_none(), "We forced holding cell?");
3988 /// Adds a pending update to this channel. See the doc for send_htlc for
3989 /// further details on the optionness of the return value.
3990 /// If our balance is too low to cover the cost of the next commitment transaction at the
3991 /// new feerate, the update is cancelled.
3993 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3994 /// [`Channel`] if `force_holding_cell` is false.
3995 fn send_update_fee<F: Deref, L: Deref>(
3996 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3997 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3998 ) -> Option<msgs::UpdateFee>
3999 where F::Target: FeeEstimator, L::Target: Logger
4001 if !self.context.is_outbound() {
4002 panic!("Cannot send fee from inbound channel");
4004 if !self.context.is_usable() {
4005 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
4007 if !self.context.is_live() {
4008 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
4011 // Before proposing a feerate update, check that we can actually afford the new fee.
4012 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
4013 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
4014 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4015 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4016 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
4017 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
4018 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4019 //TODO: auto-close after a number of failures?
4020 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
4024 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
4025 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4026 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4027 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4028 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4029 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4032 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4033 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4037 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4038 force_holding_cell = true;
4041 if force_holding_cell {
4042 self.context.holding_cell_update_fee = Some(feerate_per_kw);
4046 debug_assert!(self.context.pending_update_fee.is_none());
4047 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4049 Some(msgs::UpdateFee {
4050 channel_id: self.context.channel_id,
4055 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4056 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4058 /// No further message handling calls may be made until a channel_reestablish dance has
4060 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
4061 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
4062 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4063 if self.context.channel_state.is_pre_funded_state() {
4067 if self.context.channel_state.is_peer_disconnected() {
4068 // While the below code should be idempotent, it's simpler to just return early, as
4069 // redundant disconnect events can fire, though they should be rare.
4073 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4074 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4077 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4078 // will be retransmitted.
4079 self.context.last_sent_closing_fee = None;
4080 self.context.pending_counterparty_closing_signed = None;
4081 self.context.closing_fee_limits = None;
4083 let mut inbound_drop_count = 0;
4084 self.context.pending_inbound_htlcs.retain(|htlc| {
4086 InboundHTLCState::RemoteAnnounced(_) => {
4087 // They sent us an update_add_htlc but we never got the commitment_signed.
4088 // We'll tell them what commitment_signed we're expecting next and they'll drop
4089 // this HTLC accordingly
4090 inbound_drop_count += 1;
4093 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4094 // We received a commitment_signed updating this HTLC and (at least hopefully)
4095 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4096 // in response to it yet, so don't touch it.
4099 InboundHTLCState::Committed => true,
4100 InboundHTLCState::LocalRemoved(_) => {
4101 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4102 // re-transmit if needed) and they may have even sent a revoke_and_ack back
4103 // (that we missed). Keep this around for now and if they tell us they missed
4104 // the commitment_signed we can re-transmit the update then.
4109 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4111 if let Some((_, update_state)) = self.context.pending_update_fee {
4112 if update_state == FeeUpdateState::RemoteAnnounced {
4113 debug_assert!(!self.context.is_outbound());
4114 self.context.pending_update_fee = None;
4118 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4119 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4120 // They sent us an update to remove this but haven't yet sent the corresponding
4121 // commitment_signed, we need to move it back to Committed and they can re-send
4122 // the update upon reconnection.
4123 htlc.state = OutboundHTLCState::Committed;
4127 self.context.sent_message_awaiting_response = None;
4129 self.context.channel_state.set_peer_disconnected();
4130 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4134 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4135 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4136 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4137 /// update completes (potentially immediately).
4138 /// The messages which were generated with the monitor update must *not* have been sent to the
4139 /// remote end, and must instead have been dropped. They will be regenerated when
4140 /// [`Self::monitor_updating_restored`] is called.
4142 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4143 /// [`chain::Watch`]: crate::chain::Watch
4144 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4145 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4146 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4147 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4148 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4150 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4151 self.context.monitor_pending_commitment_signed |= resend_commitment;
4152 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4153 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4154 self.context.monitor_pending_failures.append(&mut pending_fails);
4155 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4156 self.context.channel_state.set_monitor_update_in_progress();
4159 /// Indicates that the latest ChannelMonitor update has been committed by the client
4160 /// successfully and we should restore normal operation. Returns messages which should be sent
4161 /// to the remote side.
4162 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4163 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4164 user_config: &UserConfig, best_block_height: u32
4165 ) -> MonitorRestoreUpdates
4168 NS::Target: NodeSigner
4170 assert!(self.context.channel_state.is_monitor_update_in_progress());
4171 self.context.channel_state.clear_monitor_update_in_progress();
4173 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4174 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4175 // first received the funding_signed.
4176 let mut funding_broadcastable =
4177 if self.context.is_outbound() &&
4178 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4179 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
4181 self.context.funding_transaction.take()
4183 // That said, if the funding transaction is already confirmed (ie we're active with a
4184 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4185 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4186 funding_broadcastable = None;
4189 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4190 // (and we assume the user never directly broadcasts the funding transaction and waits for
4191 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4192 // * an inbound channel that failed to persist the monitor on funding_created and we got
4193 // the funding transaction confirmed before the monitor was persisted, or
4194 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4195 let channel_ready = if self.context.monitor_pending_channel_ready {
4196 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4197 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4198 self.context.monitor_pending_channel_ready = false;
4199 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4200 Some(msgs::ChannelReady {
4201 channel_id: self.context.channel_id(),
4202 next_per_commitment_point,
4203 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4207 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4209 let mut accepted_htlcs = Vec::new();
4210 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4211 let mut failed_htlcs = Vec::new();
4212 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4213 let mut finalized_claimed_htlcs = Vec::new();
4214 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4216 if self.context.channel_state.is_peer_disconnected() {
4217 self.context.monitor_pending_revoke_and_ack = false;
4218 self.context.monitor_pending_commitment_signed = false;
4219 return MonitorRestoreUpdates {
4220 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4221 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4225 let raa = if self.context.monitor_pending_revoke_and_ack {
4226 Some(self.get_last_revoke_and_ack())
4228 let commitment_update = if self.context.monitor_pending_commitment_signed {
4229 self.get_last_commitment_update_for_send(logger).ok()
4231 if commitment_update.is_some() {
4232 self.mark_awaiting_response();
4235 self.context.monitor_pending_revoke_and_ack = false;
4236 self.context.monitor_pending_commitment_signed = false;
4237 let order = self.context.resend_order.clone();
4238 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4239 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4240 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4241 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4242 MonitorRestoreUpdates {
4243 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4247 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4248 where F::Target: FeeEstimator, L::Target: Logger
4250 if self.context.is_outbound() {
4251 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4253 if self.context.channel_state.is_peer_disconnected() {
4254 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4256 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4258 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4259 self.context.update_time_counter += 1;
4260 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4261 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4262 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4263 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4264 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4265 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4266 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4267 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4268 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4269 msg.feerate_per_kw, holder_tx_dust_exposure)));
4271 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4272 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4273 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4279 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4281 #[cfg(async_signing)]
4282 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4283 let commitment_update = if self.context.signer_pending_commitment_update {
4284 self.get_last_commitment_update_for_send(logger).ok()
4286 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4287 self.context.get_funding_signed_msg(logger).1
4289 let channel_ready = if funding_signed.is_some() {
4290 self.check_get_channel_ready(0)
4293 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
4294 if commitment_update.is_some() { "a" } else { "no" },
4295 if funding_signed.is_some() { "a" } else { "no" },
4296 if channel_ready.is_some() { "a" } else { "no" });
4298 SignerResumeUpdates {
4305 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4306 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4307 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4308 msgs::RevokeAndACK {
4309 channel_id: self.context.channel_id,
4310 per_commitment_secret,
4311 next_per_commitment_point,
4313 next_local_nonce: None,
4317 /// Gets the last commitment update for immediate sending to our peer.
4318 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4319 let mut update_add_htlcs = Vec::new();
4320 let mut update_fulfill_htlcs = Vec::new();
4321 let mut update_fail_htlcs = Vec::new();
4322 let mut update_fail_malformed_htlcs = Vec::new();
4324 for htlc in self.context.pending_outbound_htlcs.iter() {
4325 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4326 update_add_htlcs.push(msgs::UpdateAddHTLC {
4327 channel_id: self.context.channel_id(),
4328 htlc_id: htlc.htlc_id,
4329 amount_msat: htlc.amount_msat,
4330 payment_hash: htlc.payment_hash,
4331 cltv_expiry: htlc.cltv_expiry,
4332 onion_routing_packet: (**onion_packet).clone(),
4333 skimmed_fee_msat: htlc.skimmed_fee_msat,
4334 blinding_point: htlc.blinding_point,
4339 for htlc in self.context.pending_inbound_htlcs.iter() {
4340 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4342 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4343 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4344 channel_id: self.context.channel_id(),
4345 htlc_id: htlc.htlc_id,
4346 reason: err_packet.clone()
4349 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4350 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4351 channel_id: self.context.channel_id(),
4352 htlc_id: htlc.htlc_id,
4353 sha256_of_onion: sha256_of_onion.clone(),
4354 failure_code: failure_code.clone(),
4357 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4358 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4359 channel_id: self.context.channel_id(),
4360 htlc_id: htlc.htlc_id,
4361 payment_preimage: payment_preimage.clone(),
4368 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4369 Some(msgs::UpdateFee {
4370 channel_id: self.context.channel_id(),
4371 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4375 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4376 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4377 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4378 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4379 if self.context.signer_pending_commitment_update {
4380 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4381 self.context.signer_pending_commitment_update = false;
4385 #[cfg(not(async_signing))] {
4386 panic!("Failed to get signature for new commitment state");
4388 #[cfg(async_signing)] {
4389 if !self.context.signer_pending_commitment_update {
4390 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4391 self.context.signer_pending_commitment_update = true;
4396 Ok(msgs::CommitmentUpdate {
4397 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4402 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4403 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4404 if self.context.channel_state.is_local_shutdown_sent() {
4405 assert!(self.context.shutdown_scriptpubkey.is_some());
4406 Some(msgs::Shutdown {
4407 channel_id: self.context.channel_id,
4408 scriptpubkey: self.get_closing_scriptpubkey(),
4413 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4414 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4416 /// Some links printed in log lines are included here to check them during build (when run with
4417 /// `cargo doc --document-private-items`):
4418 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4419 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4420 pub fn channel_reestablish<L: Deref, NS: Deref>(
4421 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4422 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4423 ) -> Result<ReestablishResponses, ChannelError>
4426 NS::Target: NodeSigner
4428 if !self.context.channel_state.is_peer_disconnected() {
4429 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4430 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4431 // just close here instead of trying to recover.
4432 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4435 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4436 msg.next_local_commitment_number == 0 {
4437 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4440 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4441 if msg.next_remote_commitment_number > 0 {
4442 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4443 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4444 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4445 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4446 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4448 if msg.next_remote_commitment_number > our_commitment_transaction {
4449 macro_rules! log_and_panic {
4450 ($err_msg: expr) => {
4451 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4452 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4455 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4456 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4457 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4458 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4459 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4460 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4461 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4462 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4466 // Before we change the state of the channel, we check if the peer is sending a very old
4467 // commitment transaction number, if yes we send a warning message.
4468 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4469 return Err(ChannelError::Warn(format!(
4470 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4471 msg.next_remote_commitment_number,
4472 our_commitment_transaction
4476 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4477 // remaining cases either succeed or ErrorMessage-fail).
4478 self.context.channel_state.clear_peer_disconnected();
4479 self.context.sent_message_awaiting_response = None;
4481 let shutdown_msg = self.get_outbound_shutdown();
4483 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4485 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4486 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4487 if !self.context.channel_state.is_our_channel_ready() ||
4488 self.context.channel_state.is_monitor_update_in_progress() {
4489 if msg.next_remote_commitment_number != 0 {
4490 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4492 // Short circuit the whole handler as there is nothing we can resend them
4493 return Ok(ReestablishResponses {
4494 channel_ready: None,
4495 raa: None, commitment_update: None,
4496 order: RAACommitmentOrder::CommitmentFirst,
4497 shutdown_msg, announcement_sigs,
4501 // We have OurChannelReady set!
4502 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4503 return Ok(ReestablishResponses {
4504 channel_ready: Some(msgs::ChannelReady {
4505 channel_id: self.context.channel_id(),
4506 next_per_commitment_point,
4507 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4509 raa: None, commitment_update: None,
4510 order: RAACommitmentOrder::CommitmentFirst,
4511 shutdown_msg, announcement_sigs,
4515 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4516 // Remote isn't waiting on any RevokeAndACK from us!
4517 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4519 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4520 if self.context.channel_state.is_monitor_update_in_progress() {
4521 self.context.monitor_pending_revoke_and_ack = true;
4524 Some(self.get_last_revoke_and_ack())
4527 debug_assert!(false, "All values should have been handled in the four cases above");
4528 return Err(ChannelError::Close(format!(
4529 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4530 msg.next_remote_commitment_number,
4531 our_commitment_transaction
4535 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4536 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4537 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4538 // the corresponding revoke_and_ack back yet.
4539 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4540 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4541 self.mark_awaiting_response();
4543 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4545 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4546 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4547 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4548 Some(msgs::ChannelReady {
4549 channel_id: self.context.channel_id(),
4550 next_per_commitment_point,
4551 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4555 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4556 if required_revoke.is_some() {
4557 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4559 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4562 Ok(ReestablishResponses {
4563 channel_ready, shutdown_msg, announcement_sigs,
4564 raa: required_revoke,
4565 commitment_update: None,
4566 order: self.context.resend_order.clone(),
4568 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4569 if required_revoke.is_some() {
4570 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4572 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4575 if self.context.channel_state.is_monitor_update_in_progress() {
4576 self.context.monitor_pending_commitment_signed = true;
4577 Ok(ReestablishResponses {
4578 channel_ready, shutdown_msg, announcement_sigs,
4579 commitment_update: None, raa: None,
4580 order: self.context.resend_order.clone(),
4583 Ok(ReestablishResponses {
4584 channel_ready, shutdown_msg, announcement_sigs,
4585 raa: required_revoke,
4586 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4587 order: self.context.resend_order.clone(),
4590 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4591 Err(ChannelError::Close(format!(
4592 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4593 msg.next_local_commitment_number,
4594 next_counterparty_commitment_number,
4597 Err(ChannelError::Close(format!(
4598 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4599 msg.next_local_commitment_number,
4600 next_counterparty_commitment_number,
4605 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4606 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4607 /// at which point they will be recalculated.
4608 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4610 where F::Target: FeeEstimator
4612 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4614 // Propose a range from our current Background feerate to our Normal feerate plus our
4615 // force_close_avoidance_max_fee_satoshis.
4616 // If we fail to come to consensus, we'll have to force-close.
4617 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4618 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4619 // that we don't expect to need fee bumping
4620 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4621 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4623 // The spec requires that (when the channel does not have anchors) we only send absolute
4624 // channel fees no greater than the absolute channel fee on the current commitment
4625 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4626 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4627 // some force-closure by old nodes, but we wanted to close the channel anyway.
4629 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4630 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4631 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4632 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4635 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4636 // below our dust limit, causing the output to disappear. We don't bother handling this
4637 // case, however, as this should only happen if a channel is closed before any (material)
4638 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4639 // come to consensus with our counterparty on appropriate fees, however it should be a
4640 // relatively rare case. We can revisit this later, though note that in order to determine
4641 // if the funders' output is dust we have to know the absolute fee we're going to use.
4642 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4643 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4644 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4645 // We always add force_close_avoidance_max_fee_satoshis to our normal
4646 // feerate-calculated fee, but allow the max to be overridden if we're using a
4647 // target feerate-calculated fee.
4648 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4649 proposed_max_feerate as u64 * tx_weight / 1000)
4651 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4654 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4655 self.context.closing_fee_limits.clone().unwrap()
4658 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4659 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4660 /// this point if we're the funder we should send the initial closing_signed, and in any case
4661 /// shutdown should complete within a reasonable timeframe.
4662 fn closing_negotiation_ready(&self) -> bool {
4663 self.context.closing_negotiation_ready()
4666 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4667 /// an Err if no progress is being made and the channel should be force-closed instead.
4668 /// Should be called on a one-minute timer.
4669 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4670 if self.closing_negotiation_ready() {
4671 if self.context.closing_signed_in_flight {
4672 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4674 self.context.closing_signed_in_flight = true;
4680 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4681 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4682 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4683 where F::Target: FeeEstimator, L::Target: Logger
4685 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4686 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4687 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4688 // that closing_negotiation_ready checks this case (as well as a few others).
4689 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4690 return Ok((None, None, None));
4693 if !self.context.is_outbound() {
4694 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4695 return self.closing_signed(fee_estimator, &msg);
4697 return Ok((None, None, None));
4700 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4701 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4702 if self.context.expecting_peer_commitment_signed {
4703 return Ok((None, None, None));
4706 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4708 assert!(self.context.shutdown_scriptpubkey.is_some());
4709 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4710 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4711 our_min_fee, our_max_fee, total_fee_satoshis);
4713 match &self.context.holder_signer {
4714 ChannelSignerType::Ecdsa(ecdsa) => {
4716 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4717 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4719 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4720 Ok((Some(msgs::ClosingSigned {
4721 channel_id: self.context.channel_id,
4722 fee_satoshis: total_fee_satoshis,
4724 fee_range: Some(msgs::ClosingSignedFeeRange {
4725 min_fee_satoshis: our_min_fee,
4726 max_fee_satoshis: our_max_fee,
4730 // TODO (taproot|arik)
4736 // Marks a channel as waiting for a response from the counterparty. If it's not received
4737 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4739 fn mark_awaiting_response(&mut self) {
4740 self.context.sent_message_awaiting_response = Some(0);
4743 /// Determines whether we should disconnect the counterparty due to not receiving a response
4744 /// within our expected timeframe.
4746 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4747 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4748 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4751 // Don't disconnect when we're not waiting on a response.
4754 *ticks_elapsed += 1;
4755 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4759 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4760 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4762 if self.context.channel_state.is_peer_disconnected() {
4763 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4765 if self.context.channel_state.is_pre_funded_state() {
4766 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4767 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4768 // can do that via error message without getting a connection fail anyway...
4769 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4771 for htlc in self.context.pending_inbound_htlcs.iter() {
4772 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4773 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4776 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4778 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4779 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4782 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4783 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4784 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4787 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4790 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4791 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4792 // any further commitment updates after we set LocalShutdownSent.
4793 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
4795 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4798 assert!(send_shutdown);
4799 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4800 Ok(scriptpubkey) => scriptpubkey,
4801 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4803 if !shutdown_scriptpubkey.is_compatible(their_features) {
4804 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4806 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4811 // From here on out, we may not fail!
4813 self.context.channel_state.set_remote_shutdown_sent();
4814 self.context.update_time_counter += 1;
4816 let monitor_update = if update_shutdown_script {
4817 self.context.latest_monitor_update_id += 1;
4818 let monitor_update = ChannelMonitorUpdate {
4819 update_id: self.context.latest_monitor_update_id,
4820 counterparty_node_id: Some(self.context.counterparty_node_id),
4821 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4822 scriptpubkey: self.get_closing_scriptpubkey(),
4825 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4826 self.push_ret_blockable_mon_update(monitor_update)
4828 let shutdown = if send_shutdown {
4829 Some(msgs::Shutdown {
4830 channel_id: self.context.channel_id,
4831 scriptpubkey: self.get_closing_scriptpubkey(),
4835 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4836 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4837 // cell HTLCs and return them to fail the payment.
4838 self.context.holding_cell_update_fee = None;
4839 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4840 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4842 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4843 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4850 self.context.channel_state.set_local_shutdown_sent();
4851 self.context.update_time_counter += 1;
4853 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4856 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4857 let mut tx = closing_tx.trust().built_transaction().clone();
4859 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4861 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4862 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4863 let mut holder_sig = sig.serialize_der().to_vec();
4864 holder_sig.push(EcdsaSighashType::All as u8);
4865 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4866 cp_sig.push(EcdsaSighashType::All as u8);
4867 if funding_key[..] < counterparty_funding_key[..] {
4868 tx.input[0].witness.push(holder_sig);
4869 tx.input[0].witness.push(cp_sig);
4871 tx.input[0].witness.push(cp_sig);
4872 tx.input[0].witness.push(holder_sig);
4875 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4879 pub fn closing_signed<F: Deref>(
4880 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4881 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4882 where F::Target: FeeEstimator
4884 if !self.context.channel_state.is_both_sides_shutdown() {
4885 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4887 if self.context.channel_state.is_peer_disconnected() {
4888 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4890 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4891 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4893 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4894 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4897 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4898 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4901 if self.context.channel_state.is_monitor_update_in_progress() {
4902 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4903 return Ok((None, None, None));
4906 let funding_redeemscript = self.context.get_funding_redeemscript();
4907 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4908 if used_total_fee != msg.fee_satoshis {
4909 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4911 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4913 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4916 // The remote end may have decided to revoke their output due to inconsistent dust
4917 // limits, so check for that case by re-checking the signature here.
4918 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4919 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4920 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4924 for outp in closing_tx.trust().built_transaction().output.iter() {
4925 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4926 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4930 assert!(self.context.shutdown_scriptpubkey.is_some());
4931 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4932 if last_fee == msg.fee_satoshis {
4933 let shutdown_result = ShutdownResult {
4934 monitor_update: None,
4935 dropped_outbound_htlcs: Vec::new(),
4936 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4937 channel_id: self.context.channel_id,
4938 counterparty_node_id: self.context.counterparty_node_id,
4940 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4941 self.context.channel_state = ChannelState::ShutdownComplete;
4942 self.context.update_time_counter += 1;
4943 return Ok((None, Some(tx), Some(shutdown_result)));
4947 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4949 macro_rules! propose_fee {
4950 ($new_fee: expr) => {
4951 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4952 (closing_tx, $new_fee)
4954 self.build_closing_transaction($new_fee, false)
4957 return match &self.context.holder_signer {
4958 ChannelSignerType::Ecdsa(ecdsa) => {
4960 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4961 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4962 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4963 let shutdown_result = ShutdownResult {
4964 monitor_update: None,
4965 dropped_outbound_htlcs: Vec::new(),
4966 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4967 channel_id: self.context.channel_id,
4968 counterparty_node_id: self.context.counterparty_node_id,
4970 self.context.channel_state = ChannelState::ShutdownComplete;
4971 self.context.update_time_counter += 1;
4972 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4973 (Some(tx), Some(shutdown_result))
4978 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4979 Ok((Some(msgs::ClosingSigned {
4980 channel_id: self.context.channel_id,
4981 fee_satoshis: used_fee,
4983 fee_range: Some(msgs::ClosingSignedFeeRange {
4984 min_fee_satoshis: our_min_fee,
4985 max_fee_satoshis: our_max_fee,
4987 }), signed_tx, shutdown_result))
4989 // TODO (taproot|arik)
4996 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4997 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4998 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
5000 if max_fee_satoshis < our_min_fee {
5001 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
5003 if min_fee_satoshis > our_max_fee {
5004 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
5007 if !self.context.is_outbound() {
5008 // They have to pay, so pick the highest fee in the overlapping range.
5009 // We should never set an upper bound aside from their full balance
5010 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
5011 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
5013 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
5014 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
5015 msg.fee_satoshis, our_min_fee, our_max_fee)));
5017 // The proposed fee is in our acceptable range, accept it and broadcast!
5018 propose_fee!(msg.fee_satoshis);
5021 // Old fee style negotiation. We don't bother to enforce whether they are complying
5022 // with the "making progress" requirements, we just comply and hope for the best.
5023 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
5024 if msg.fee_satoshis > last_fee {
5025 if msg.fee_satoshis < our_max_fee {
5026 propose_fee!(msg.fee_satoshis);
5027 } else if last_fee < our_max_fee {
5028 propose_fee!(our_max_fee);
5030 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5033 if msg.fee_satoshis > our_min_fee {
5034 propose_fee!(msg.fee_satoshis);
5035 } else if last_fee > our_min_fee {
5036 propose_fee!(our_min_fee);
5038 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5042 if msg.fee_satoshis < our_min_fee {
5043 propose_fee!(our_min_fee);
5044 } else if msg.fee_satoshis > our_max_fee {
5045 propose_fee!(our_max_fee);
5047 propose_fee!(msg.fee_satoshis);
5053 fn internal_htlc_satisfies_config(
5054 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
5055 ) -> Result<(), (&'static str, u16)> {
5056 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
5057 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
5058 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
5059 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5061 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5062 0x1000 | 12, // fee_insufficient
5065 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5067 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5068 0x1000 | 13, // incorrect_cltv_expiry
5074 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5075 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5076 /// unsuccessful, falls back to the previous one if one exists.
5077 pub fn htlc_satisfies_config(
5078 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5079 ) -> Result<(), (&'static str, u16)> {
5080 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5082 if let Some(prev_config) = self.context.prev_config() {
5083 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5090 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5091 self.context.cur_holder_commitment_transaction_number + 1
5094 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5095 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
5098 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5099 self.context.cur_counterparty_commitment_transaction_number + 2
5103 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5104 &self.context.holder_signer
5108 pub fn get_value_stat(&self) -> ChannelValueStat {
5110 value_to_self_msat: self.context.value_to_self_msat,
5111 channel_value_msat: self.context.channel_value_satoshis * 1000,
5112 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5113 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5114 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5115 holding_cell_outbound_amount_msat: {
5117 for h in self.context.holding_cell_htlc_updates.iter() {
5119 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5127 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5128 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5132 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5133 /// Allowed in any state (including after shutdown)
5134 pub fn is_awaiting_monitor_update(&self) -> bool {
5135 self.context.channel_state.is_monitor_update_in_progress()
5138 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5139 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5140 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5141 self.context.blocked_monitor_updates[0].update.update_id - 1
5144 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5145 /// further blocked monitor update exists after the next.
5146 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5147 if self.context.blocked_monitor_updates.is_empty() { return None; }
5148 Some((self.context.blocked_monitor_updates.remove(0).update,
5149 !self.context.blocked_monitor_updates.is_empty()))
5152 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5153 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5154 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5155 -> Option<ChannelMonitorUpdate> {
5156 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5157 if !release_monitor {
5158 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5167 pub fn blocked_monitor_updates_pending(&self) -> usize {
5168 self.context.blocked_monitor_updates.len()
5171 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5172 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5173 /// transaction. If the channel is inbound, this implies simply that the channel has not
5175 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5176 if !self.is_awaiting_monitor_update() { return false; }
5178 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5179 if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
5181 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5182 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5183 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5186 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5187 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5188 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5189 // waiting for the initial monitor persistence. Thus, we check if our commitment
5190 // transaction numbers have both been iterated only exactly once (for the
5191 // funding_signed), and we're awaiting monitor update.
5193 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5194 // only way to get an awaiting-monitor-update state during initial funding is if the
5195 // initial monitor persistence is still pending).
5197 // Because deciding we're awaiting initial broadcast spuriously could result in
5198 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5199 // we hard-assert here, even in production builds.
5200 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5201 assert!(self.context.monitor_pending_channel_ready);
5202 assert_eq!(self.context.latest_monitor_update_id, 0);
5208 /// Returns true if our channel_ready has been sent
5209 pub fn is_our_channel_ready(&self) -> bool {
5210 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5211 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5214 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5215 pub fn received_shutdown(&self) -> bool {
5216 self.context.channel_state.is_remote_shutdown_sent()
5219 /// Returns true if we either initiated or agreed to shut down the channel.
5220 pub fn sent_shutdown(&self) -> bool {
5221 self.context.channel_state.is_local_shutdown_sent()
5224 /// Returns true if this channel is fully shut down. True here implies that no further actions
5225 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5226 /// will be handled appropriately by the chain monitor.
5227 pub fn is_shutdown(&self) -> bool {
5228 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5231 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5232 self.context.channel_update_status
5235 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5236 self.context.update_time_counter += 1;
5237 self.context.channel_update_status = status;
5240 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5242 // * always when a new block/transactions are confirmed with the new height
5243 // * when funding is signed with a height of 0
5244 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5248 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5249 if funding_tx_confirmations <= 0 {
5250 self.context.funding_tx_confirmation_height = 0;
5253 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5257 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5258 // channel_ready yet.
5259 if self.context.signer_pending_funding {
5263 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5264 // channel_ready until the entire batch is ready.
5265 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
5266 self.context.channel_state.set_our_channel_ready();
5268 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5269 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5270 self.context.update_time_counter += 1;
5272 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5273 // We got a reorg but not enough to trigger a force close, just ignore.
5276 if self.context.funding_tx_confirmation_height != 0 &&
5277 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5279 // We should never see a funding transaction on-chain until we've received
5280 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5281 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5282 // however, may do this and we shouldn't treat it as a bug.
5283 #[cfg(not(fuzzing))]
5284 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5285 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5286 self.context.channel_state.to_u32());
5288 // We got a reorg but not enough to trigger a force close, just ignore.
5292 if need_commitment_update {
5293 if !self.context.channel_state.is_monitor_update_in_progress() {
5294 if !self.context.channel_state.is_peer_disconnected() {
5295 let next_per_commitment_point =
5296 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5297 return Some(msgs::ChannelReady {
5298 channel_id: self.context.channel_id,
5299 next_per_commitment_point,
5300 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5304 self.context.monitor_pending_channel_ready = true;
5310 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5311 /// In the first case, we store the confirmation height and calculating the short channel id.
5312 /// In the second, we simply return an Err indicating we need to be force-closed now.
5313 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5314 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5315 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5316 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5318 NS::Target: NodeSigner,
5321 let mut msgs = (None, None);
5322 if let Some(funding_txo) = self.context.get_funding_txo() {
5323 for &(index_in_block, tx) in txdata.iter() {
5324 // Check if the transaction is the expected funding transaction, and if it is,
5325 // check that it pays the right amount to the right script.
5326 if self.context.funding_tx_confirmation_height == 0 {
5327 if tx.txid() == funding_txo.txid {
5328 let txo_idx = funding_txo.index as usize;
5329 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5330 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5331 if self.context.is_outbound() {
5332 // If we generated the funding transaction and it doesn't match what it
5333 // should, the client is really broken and we should just panic and
5334 // tell them off. That said, because hash collisions happen with high
5335 // probability in fuzzing mode, if we're fuzzing we just close the
5336 // channel and move on.
5337 #[cfg(not(fuzzing))]
5338 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5340 self.context.update_time_counter += 1;
5341 let err_reason = "funding tx had wrong script/value or output index";
5342 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5344 if self.context.is_outbound() {
5345 if !tx.is_coin_base() {
5346 for input in tx.input.iter() {
5347 if input.witness.is_empty() {
5348 // We generated a malleable funding transaction, implying we've
5349 // just exposed ourselves to funds loss to our counterparty.
5350 #[cfg(not(fuzzing))]
5351 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5356 self.context.funding_tx_confirmation_height = height;
5357 self.context.funding_tx_confirmed_in = Some(*block_hash);
5358 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5359 Ok(scid) => Some(scid),
5360 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5363 // If this is a coinbase transaction and not a 0-conf channel
5364 // we should update our min_depth to 100 to handle coinbase maturity
5365 if tx.is_coin_base() &&
5366 self.context.minimum_depth.unwrap_or(0) > 0 &&
5367 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5368 self.context.minimum_depth = Some(COINBASE_MATURITY);
5371 // If we allow 1-conf funding, we may need to check for channel_ready here and
5372 // send it immediately instead of waiting for a best_block_updated call (which
5373 // may have already happened for this block).
5374 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5375 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5376 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5377 msgs = (Some(channel_ready), announcement_sigs);
5380 for inp in tx.input.iter() {
5381 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5382 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5383 return Err(ClosureReason::CommitmentTxConfirmed);
5391 /// When a new block is connected, we check the height of the block against outbound holding
5392 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5393 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5394 /// handled by the ChannelMonitor.
5396 /// If we return Err, the channel may have been closed, at which point the standard
5397 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5400 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5402 pub fn best_block_updated<NS: Deref, L: Deref>(
5403 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5404 node_signer: &NS, user_config: &UserConfig, logger: &L
5405 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5407 NS::Target: NodeSigner,
5410 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5413 fn do_best_block_updated<NS: Deref, L: Deref>(
5414 &mut self, height: u32, highest_header_time: u32,
5415 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5416 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5418 NS::Target: NodeSigner,
5421 let mut timed_out_htlcs = Vec::new();
5422 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5423 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5425 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5426 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5428 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5429 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5430 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5438 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5440 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5441 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5442 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5444 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5445 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5448 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5449 self.context.channel_state.is_our_channel_ready() {
5450 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5451 if self.context.funding_tx_confirmation_height == 0 {
5452 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5453 // zero if it has been reorged out, however in either case, our state flags
5454 // indicate we've already sent a channel_ready
5455 funding_tx_confirmations = 0;
5458 // If we've sent channel_ready (or have both sent and received channel_ready), and
5459 // the funding transaction has become unconfirmed,
5460 // close the channel and hope we can get the latest state on chain (because presumably
5461 // the funding transaction is at least still in the mempool of most nodes).
5463 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5464 // 0-conf channel, but not doing so may lead to the
5465 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5467 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5468 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5469 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5470 return Err(ClosureReason::ProcessingError { err: err_reason });
5472 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5473 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5474 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5475 // If funding_tx_confirmed_in is unset, the channel must not be active
5476 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5477 assert!(!self.context.channel_state.is_our_channel_ready());
5478 return Err(ClosureReason::FundingTimedOut);
5481 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5482 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5484 Ok((None, timed_out_htlcs, announcement_sigs))
5487 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5488 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5489 /// before the channel has reached channel_ready and we can just wait for more blocks.
5490 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5491 if self.context.funding_tx_confirmation_height != 0 {
5492 // We handle the funding disconnection by calling best_block_updated with a height one
5493 // below where our funding was connected, implying a reorg back to conf_height - 1.
5494 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5495 // We use the time field to bump the current time we set on channel updates if its
5496 // larger. If we don't know that time has moved forward, we can just set it to the last
5497 // time we saw and it will be ignored.
5498 let best_time = self.context.update_time_counter;
5499 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
5500 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5501 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5502 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5503 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5509 // We never learned about the funding confirmation anyway, just ignore
5514 // Methods to get unprompted messages to send to the remote end (or where we already returned
5515 // something in the handler for the message that prompted this message):
5517 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5518 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5519 /// directions). Should be used for both broadcasted announcements and in response to an
5520 /// AnnouncementSignatures message from the remote peer.
5522 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5525 /// This will only return ChannelError::Ignore upon failure.
5527 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5528 fn get_channel_announcement<NS: Deref>(
5529 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5530 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5531 if !self.context.config.announced_channel {
5532 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5534 if !self.context.is_usable() {
5535 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5538 let short_channel_id = self.context.get_short_channel_id()
5539 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5540 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5541 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5542 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5543 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5545 let msg = msgs::UnsignedChannelAnnouncement {
5546 features: channelmanager::provided_channel_features(&user_config),
5549 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5550 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5551 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5552 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5553 excess_data: Vec::new(),
5559 fn get_announcement_sigs<NS: Deref, L: Deref>(
5560 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5561 best_block_height: u32, logger: &L
5562 ) -> Option<msgs::AnnouncementSignatures>
5564 NS::Target: NodeSigner,
5567 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5571 if !self.context.is_usable() {
5575 if self.context.channel_state.is_peer_disconnected() {
5576 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5580 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5584 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5585 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5588 log_trace!(logger, "{:?}", e);
5592 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5594 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5599 match &self.context.holder_signer {
5600 ChannelSignerType::Ecdsa(ecdsa) => {
5601 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5603 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5608 let short_channel_id = match self.context.get_short_channel_id() {
5610 None => return None,
5613 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5615 Some(msgs::AnnouncementSignatures {
5616 channel_id: self.context.channel_id(),
5618 node_signature: our_node_sig,
5619 bitcoin_signature: our_bitcoin_sig,
5622 // TODO (taproot|arik)
5628 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5630 fn sign_channel_announcement<NS: Deref>(
5631 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5632 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5633 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5634 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5635 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5636 let were_node_one = announcement.node_id_1 == our_node_key;
5638 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5639 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5640 match &self.context.holder_signer {
5641 ChannelSignerType::Ecdsa(ecdsa) => {
5642 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5643 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5644 Ok(msgs::ChannelAnnouncement {
5645 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5646 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5647 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5648 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5649 contents: announcement,
5652 // TODO (taproot|arik)
5657 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5661 /// Processes an incoming announcement_signatures message, providing a fully-signed
5662 /// channel_announcement message which we can broadcast and storing our counterparty's
5663 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5664 pub fn announcement_signatures<NS: Deref>(
5665 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5666 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5667 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5668 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5670 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5672 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5673 return Err(ChannelError::Close(format!(
5674 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5675 &announcement, self.context.get_counterparty_node_id())));
5677 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5678 return Err(ChannelError::Close(format!(
5679 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5680 &announcement, self.context.counterparty_funding_pubkey())));
5683 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5684 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5685 return Err(ChannelError::Ignore(
5686 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5689 self.sign_channel_announcement(node_signer, announcement)
5692 /// Gets a signed channel_announcement for this channel, if we previously received an
5693 /// announcement_signatures from our counterparty.
5694 pub fn get_signed_channel_announcement<NS: Deref>(
5695 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5696 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5697 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5700 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5702 Err(_) => return None,
5704 match self.sign_channel_announcement(node_signer, announcement) {
5705 Ok(res) => Some(res),
5710 /// May panic if called on a channel that wasn't immediately-previously
5711 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5712 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5713 assert!(self.context.channel_state.is_peer_disconnected());
5714 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5715 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5716 // current to_remote balances. However, it no longer has any use, and thus is now simply
5717 // set to a dummy (but valid, as required by the spec) public key.
5718 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5719 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5720 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5721 let mut pk = [2; 33]; pk[1] = 0xff;
5722 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5723 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5724 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5725 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5728 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5731 self.mark_awaiting_response();
5732 msgs::ChannelReestablish {
5733 channel_id: self.context.channel_id(),
5734 // The protocol has two different commitment number concepts - the "commitment
5735 // transaction number", which starts from 0 and counts up, and the "revocation key
5736 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5737 // commitment transaction numbers by the index which will be used to reveal the
5738 // revocation key for that commitment transaction, which means we have to convert them
5739 // to protocol-level commitment numbers here...
5741 // next_local_commitment_number is the next commitment_signed number we expect to
5742 // receive (indicating if they need to resend one that we missed).
5743 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5744 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5745 // receive, however we track it by the next commitment number for a remote transaction
5746 // (which is one further, as they always revoke previous commitment transaction, not
5747 // the one we send) so we have to decrement by 1. Note that if
5748 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5749 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
5751 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5752 your_last_per_commitment_secret: remote_last_secret,
5753 my_current_per_commitment_point: dummy_pubkey,
5754 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5755 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5756 // txid of that interactive transaction, else we MUST NOT set it.
5757 next_funding_txid: None,
5762 // Send stuff to our remote peers:
5764 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5765 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5766 /// commitment update.
5768 /// `Err`s will only be [`ChannelError::Ignore`].
5769 pub fn queue_add_htlc<F: Deref, L: Deref>(
5770 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5771 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5772 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5773 ) -> Result<(), ChannelError>
5774 where F::Target: FeeEstimator, L::Target: Logger
5777 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5778 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5779 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5781 if let ChannelError::Ignore(_) = err { /* fine */ }
5782 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5787 /// Adds a pending outbound HTLC to this channel, note that you probably want
5788 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5790 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5792 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5793 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5795 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5796 /// we may not yet have sent the previous commitment update messages and will need to
5797 /// regenerate them.
5799 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5800 /// on this [`Channel`] if `force_holding_cell` is false.
5802 /// `Err`s will only be [`ChannelError::Ignore`].
5803 fn send_htlc<F: Deref, L: Deref>(
5804 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5805 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5806 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5807 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5808 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5809 where F::Target: FeeEstimator, L::Target: Logger
5811 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5812 self.context.channel_state.is_local_shutdown_sent() ||
5813 self.context.channel_state.is_remote_shutdown_sent()
5815 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5817 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5818 if amount_msat > channel_total_msat {
5819 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5822 if amount_msat == 0 {
5823 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5826 let available_balances = self.context.get_available_balances(fee_estimator);
5827 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5828 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5829 available_balances.next_outbound_htlc_minimum_msat)));
5832 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5833 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5834 available_balances.next_outbound_htlc_limit_msat)));
5837 if self.context.channel_state.is_peer_disconnected() {
5838 // Note that this should never really happen, if we're !is_live() on receipt of an
5839 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5840 // the user to send directly into a !is_live() channel. However, if we
5841 // disconnected during the time the previous hop was doing the commitment dance we may
5842 // end up getting here after the forwarding delay. In any case, returning an
5843 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5844 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5847 let need_holding_cell = self.context.channel_state.should_force_holding_cell();
5848 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5849 payment_hash, amount_msat,
5850 if force_holding_cell { "into holding cell" }
5851 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5852 else { "to peer" });
5854 if need_holding_cell {
5855 force_holding_cell = true;
5858 // Now update local state:
5859 if force_holding_cell {
5860 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5865 onion_routing_packet,
5872 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5873 htlc_id: self.context.next_holder_htlc_id,
5875 payment_hash: payment_hash.clone(),
5877 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5883 let res = msgs::UpdateAddHTLC {
5884 channel_id: self.context.channel_id,
5885 htlc_id: self.context.next_holder_htlc_id,
5889 onion_routing_packet,
5893 self.context.next_holder_htlc_id += 1;
5898 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5899 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5900 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5901 // fail to generate this, we still are at least at a position where upgrading their status
5903 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5904 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5905 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5907 if let Some(state) = new_state {
5908 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5912 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5913 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5914 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5915 // Grab the preimage, if it exists, instead of cloning
5916 let mut reason = OutboundHTLCOutcome::Success(None);
5917 mem::swap(outcome, &mut reason);
5918 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5921 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5922 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5923 debug_assert!(!self.context.is_outbound());
5924 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5925 self.context.feerate_per_kw = feerate;
5926 self.context.pending_update_fee = None;
5929 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5931 let (mut htlcs_ref, counterparty_commitment_tx) =
5932 self.build_commitment_no_state_update(logger);
5933 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5934 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5935 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5937 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5938 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5941 self.context.latest_monitor_update_id += 1;
5942 let monitor_update = ChannelMonitorUpdate {
5943 update_id: self.context.latest_monitor_update_id,
5944 counterparty_node_id: Some(self.context.counterparty_node_id),
5945 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5946 commitment_txid: counterparty_commitment_txid,
5947 htlc_outputs: htlcs.clone(),
5948 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5949 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5950 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5951 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5952 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5955 self.context.channel_state.set_awaiting_remote_revoke();
5959 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5960 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5961 where L::Target: Logger
5963 let counterparty_keys = self.context.build_remote_transaction_keys();
5964 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5965 let counterparty_commitment_tx = commitment_stats.tx;
5967 #[cfg(any(test, fuzzing))]
5969 if !self.context.is_outbound() {
5970 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5971 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5972 if let Some(info) = projected_commit_tx_info {
5973 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5974 if info.total_pending_htlcs == total_pending_htlcs
5975 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5976 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5977 && info.feerate == self.context.feerate_per_kw {
5978 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5979 assert_eq!(actual_fee, info.fee);
5985 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5988 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5989 /// generation when we shouldn't change HTLC/channel state.
5990 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5991 // Get the fee tests from `build_commitment_no_state_update`
5992 #[cfg(any(test, fuzzing))]
5993 self.build_commitment_no_state_update(logger);
5995 let counterparty_keys = self.context.build_remote_transaction_keys();
5996 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5997 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5999 match &self.context.holder_signer {
6000 ChannelSignerType::Ecdsa(ecdsa) => {
6001 let (signature, htlc_signatures);
6004 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
6005 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
6009 let res = ecdsa.sign_counterparty_commitment(
6010 &commitment_stats.tx,
6011 commitment_stats.inbound_htlc_preimages,
6012 commitment_stats.outbound_htlc_preimages,
6013 &self.context.secp_ctx,
6014 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
6016 htlc_signatures = res.1;
6018 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
6019 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
6020 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
6021 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
6023 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6024 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6025 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6026 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
6027 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
6028 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
6032 Ok((msgs::CommitmentSigned {
6033 channel_id: self.context.channel_id,
6037 partial_signature_with_nonce: None,
6038 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6040 // TODO (taproot|arik)
6046 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6047 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6049 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6050 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6051 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
6052 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
6053 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6054 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6055 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
6056 where F::Target: FeeEstimator, L::Target: Logger
6058 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
6059 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
6060 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6063 let monitor_update = self.build_commitment_no_status_check(logger);
6064 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6065 Ok(self.push_ret_blockable_mon_update(monitor_update))
6071 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
6073 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
6074 let new_forwarding_info = Some(CounterpartyForwardingInfo {
6075 fee_base_msat: msg.contents.fee_base_msat,
6076 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6077 cltv_expiry_delta: msg.contents.cltv_expiry_delta
6079 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
6081 self.context.counterparty_forwarding_info = new_forwarding_info;
6087 /// Begins the shutdown process, getting a message for the remote peer and returning all
6088 /// holding cell HTLCs for payment failure.
6089 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6090 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6091 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6093 for htlc in self.context.pending_outbound_htlcs.iter() {
6094 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6095 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6098 if self.context.channel_state.is_local_shutdown_sent() {
6099 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6101 else if self.context.channel_state.is_remote_shutdown_sent() {
6102 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6104 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6105 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6107 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6108 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6109 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6112 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6115 // use override shutdown script if provided
6116 let shutdown_scriptpubkey = match override_shutdown_script {
6117 Some(script) => script,
6119 // otherwise, use the shutdown scriptpubkey provided by the signer
6120 match signer_provider.get_shutdown_scriptpubkey() {
6121 Ok(scriptpubkey) => scriptpubkey,
6122 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6126 if !shutdown_scriptpubkey.is_compatible(their_features) {
6127 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6129 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6134 // From here on out, we may not fail!
6135 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6136 self.context.channel_state.set_local_shutdown_sent();
6137 self.context.update_time_counter += 1;
6139 let monitor_update = if update_shutdown_script {
6140 self.context.latest_monitor_update_id += 1;
6141 let monitor_update = ChannelMonitorUpdate {
6142 update_id: self.context.latest_monitor_update_id,
6143 counterparty_node_id: Some(self.context.counterparty_node_id),
6144 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6145 scriptpubkey: self.get_closing_scriptpubkey(),
6148 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6149 self.push_ret_blockable_mon_update(monitor_update)
6151 let shutdown = msgs::Shutdown {
6152 channel_id: self.context.channel_id,
6153 scriptpubkey: self.get_closing_scriptpubkey(),
6156 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6157 // our shutdown until we've committed all of the pending changes.
6158 self.context.holding_cell_update_fee = None;
6159 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6160 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6162 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6163 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6170 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6171 "we can't both complete shutdown and return a monitor update");
6173 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6176 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6177 self.context.holding_cell_htlc_updates.iter()
6178 .flat_map(|htlc_update| {
6180 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6181 => Some((source, payment_hash)),
6185 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6189 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6190 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6191 pub context: ChannelContext<SP>,
6192 pub unfunded_context: UnfundedChannelContext,
6195 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6196 pub fn new<ES: Deref, F: Deref>(
6197 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6198 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6199 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6200 ) -> Result<OutboundV1Channel<SP>, APIError>
6201 where ES::Target: EntropySource,
6202 F::Target: FeeEstimator
6204 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6205 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6206 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6207 let pubkeys = holder_signer.pubkeys().clone();
6209 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6210 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6212 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6213 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6215 let channel_value_msat = channel_value_satoshis * 1000;
6216 if push_msat > channel_value_msat {
6217 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6219 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6220 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6222 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6223 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6224 // Protocol level safety check in place, although it should never happen because
6225 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6226 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6229 let channel_type = Self::get_initial_channel_type(&config, their_features);
6230 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6232 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6233 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6235 (ConfirmationTarget::NonAnchorChannelFee, 0)
6237 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6239 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6240 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6241 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6242 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6245 let mut secp_ctx = Secp256k1::new();
6246 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6248 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6249 match signer_provider.get_shutdown_scriptpubkey() {
6250 Ok(scriptpubkey) => Some(scriptpubkey),
6251 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6255 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6256 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6257 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6261 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6262 Ok(script) => script,
6263 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6266 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6269 context: ChannelContext {
6272 config: LegacyChannelConfig {
6273 options: config.channel_config.clone(),
6274 announced_channel: config.channel_handshake_config.announced_channel,
6275 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6280 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6282 channel_id: temporary_channel_id,
6283 temporary_channel_id: Some(temporary_channel_id),
6284 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6285 announcement_sigs_state: AnnouncementSigsState::NotSent,
6287 channel_value_satoshis,
6289 latest_monitor_update_id: 0,
6291 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6292 shutdown_scriptpubkey,
6295 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6296 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6299 pending_inbound_htlcs: Vec::new(),
6300 pending_outbound_htlcs: Vec::new(),
6301 holding_cell_htlc_updates: Vec::new(),
6302 pending_update_fee: None,
6303 holding_cell_update_fee: None,
6304 next_holder_htlc_id: 0,
6305 next_counterparty_htlc_id: 0,
6306 update_time_counter: 1,
6308 resend_order: RAACommitmentOrder::CommitmentFirst,
6310 monitor_pending_channel_ready: false,
6311 monitor_pending_revoke_and_ack: false,
6312 monitor_pending_commitment_signed: false,
6313 monitor_pending_forwards: Vec::new(),
6314 monitor_pending_failures: Vec::new(),
6315 monitor_pending_finalized_fulfills: Vec::new(),
6317 signer_pending_commitment_update: false,
6318 signer_pending_funding: false,
6320 #[cfg(debug_assertions)]
6321 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6322 #[cfg(debug_assertions)]
6323 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6325 last_sent_closing_fee: None,
6326 pending_counterparty_closing_signed: None,
6327 expecting_peer_commitment_signed: false,
6328 closing_fee_limits: None,
6329 target_closing_feerate_sats_per_kw: None,
6331 funding_tx_confirmed_in: None,
6332 funding_tx_confirmation_height: 0,
6333 short_channel_id: None,
6334 channel_creation_height: current_chain_height,
6336 feerate_per_kw: commitment_feerate,
6337 counterparty_dust_limit_satoshis: 0,
6338 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6339 counterparty_max_htlc_value_in_flight_msat: 0,
6340 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6341 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6342 holder_selected_channel_reserve_satoshis,
6343 counterparty_htlc_minimum_msat: 0,
6344 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6345 counterparty_max_accepted_htlcs: 0,
6346 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6347 minimum_depth: None, // Filled in in accept_channel
6349 counterparty_forwarding_info: None,
6351 channel_transaction_parameters: ChannelTransactionParameters {
6352 holder_pubkeys: pubkeys,
6353 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6354 is_outbound_from_holder: true,
6355 counterparty_parameters: None,
6356 funding_outpoint: None,
6357 channel_type_features: channel_type.clone()
6359 funding_transaction: None,
6360 is_batch_funding: None,
6362 counterparty_cur_commitment_point: None,
6363 counterparty_prev_commitment_point: None,
6364 counterparty_node_id,
6366 counterparty_shutdown_scriptpubkey: None,
6368 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6370 channel_update_status: ChannelUpdateStatus::Enabled,
6371 closing_signed_in_flight: false,
6373 announcement_sigs: None,
6375 #[cfg(any(test, fuzzing))]
6376 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6377 #[cfg(any(test, fuzzing))]
6378 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6380 workaround_lnd_bug_4006: None,
6381 sent_message_awaiting_response: None,
6383 latest_inbound_scid_alias: None,
6384 outbound_scid_alias,
6386 channel_pending_event_emitted: false,
6387 channel_ready_event_emitted: false,
6389 #[cfg(any(test, fuzzing))]
6390 historical_inbound_htlc_fulfills: HashSet::new(),
6395 blocked_monitor_updates: Vec::new(),
6397 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6401 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
6402 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6403 let counterparty_keys = self.context.build_remote_transaction_keys();
6404 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6405 let signature = match &self.context.holder_signer {
6406 // TODO (taproot|arik): move match into calling method for Taproot
6407 ChannelSignerType::Ecdsa(ecdsa) => {
6408 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
6409 .map(|(sig, _)| sig).ok()?
6411 // TODO (taproot|arik)
6416 if self.context.signer_pending_funding {
6417 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
6418 self.context.signer_pending_funding = false;
6421 Some(msgs::FundingCreated {
6422 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
6423 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
6424 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
6427 partial_signature_with_nonce: None,
6429 next_local_nonce: None,
6433 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6434 /// a funding_created message for the remote peer.
6435 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6436 /// or if called on an inbound channel.
6437 /// Note that channel_id changes during this call!
6438 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6439 /// If an Err is returned, it is a ChannelError::Close.
6440 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6441 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
6442 if !self.context.is_outbound() {
6443 panic!("Tried to create outbound funding_created message on an inbound channel!");
6446 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6447 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6449 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6451 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6452 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6453 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6454 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6457 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6458 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6460 // Now that we're past error-generating stuff, update our local state:
6462 self.context.channel_state = ChannelState::FundingNegotiated;
6463 self.context.channel_id = funding_txo.to_channel_id();
6465 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6466 // We can skip this if it is a zero-conf channel.
6467 if funding_transaction.is_coin_base() &&
6468 self.context.minimum_depth.unwrap_or(0) > 0 &&
6469 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6470 self.context.minimum_depth = Some(COINBASE_MATURITY);
6473 self.context.funding_transaction = Some(funding_transaction);
6474 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6476 let funding_created = self.get_funding_created_msg(logger);
6477 if funding_created.is_none() {
6478 #[cfg(not(async_signing))] {
6479 panic!("Failed to get signature for new funding creation");
6481 #[cfg(async_signing)] {
6482 if !self.context.signer_pending_funding {
6483 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6484 self.context.signer_pending_funding = true;
6492 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6493 // The default channel type (ie the first one we try) depends on whether the channel is
6494 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6495 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6496 // with no other changes, and fall back to `only_static_remotekey`.
6497 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6498 if !config.channel_handshake_config.announced_channel &&
6499 config.channel_handshake_config.negotiate_scid_privacy &&
6500 their_features.supports_scid_privacy() {
6501 ret.set_scid_privacy_required();
6504 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6505 // set it now. If they don't understand it, we'll fall back to our default of
6506 // `only_static_remotekey`.
6507 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6508 their_features.supports_anchors_zero_fee_htlc_tx() {
6509 ret.set_anchors_zero_fee_htlc_tx_required();
6515 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6516 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6517 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6518 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6519 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6520 ) -> Result<msgs::OpenChannel, ()>
6522 F::Target: FeeEstimator
6524 if !self.context.is_outbound() ||
6526 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6527 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6532 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6533 // We've exhausted our options
6536 // We support opening a few different types of channels. Try removing our additional
6537 // features one by one until we've either arrived at our default or the counterparty has
6540 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6541 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6542 // checks whether the counterparty supports every feature, this would only happen if the
6543 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6545 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6546 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6547 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6548 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6549 } else if self.context.channel_type.supports_scid_privacy() {
6550 self.context.channel_type.clear_scid_privacy();
6552 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6554 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6555 Ok(self.get_open_channel(chain_hash))
6558 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6559 if !self.context.is_outbound() {
6560 panic!("Tried to open a channel for an inbound channel?");
6562 if self.context.have_received_message() {
6563 panic!("Cannot generate an open_channel after we've moved forward");
6566 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6567 panic!("Tried to send an open_channel for a channel that has already advanced");
6570 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6571 let keys = self.context.get_holder_pubkeys();
6575 temporary_channel_id: self.context.channel_id,
6576 funding_satoshis: self.context.channel_value_satoshis,
6577 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6578 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6579 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6580 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6581 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6582 feerate_per_kw: self.context.feerate_per_kw as u32,
6583 to_self_delay: self.context.get_holder_selected_contest_delay(),
6584 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6585 funding_pubkey: keys.funding_pubkey,
6586 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6587 payment_point: keys.payment_point,
6588 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6589 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6590 first_per_commitment_point,
6591 channel_flags: if self.context.config.announced_channel {1} else {0},
6592 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6593 Some(script) => script.clone().into_inner(),
6594 None => Builder::new().into_script(),
6596 channel_type: Some(self.context.channel_type.clone()),
6601 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6602 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6604 // Check sanity of message fields:
6605 if !self.context.is_outbound() {
6606 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6608 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
6609 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6611 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6612 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6614 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6615 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6617 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6618 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6620 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6621 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6622 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6624 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6625 if msg.htlc_minimum_msat >= full_channel_value_msat {
6626 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6628 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6629 if msg.to_self_delay > max_delay_acceptable {
6630 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6632 if msg.max_accepted_htlcs < 1 {
6633 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6635 if msg.max_accepted_htlcs > MAX_HTLCS {
6636 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6639 // Now check against optional parameters as set by config...
6640 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6641 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6643 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6644 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6646 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6647 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6649 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6650 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6652 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6653 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6655 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6656 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6658 if msg.minimum_depth > peer_limits.max_minimum_depth {
6659 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6662 if let Some(ty) = &msg.channel_type {
6663 if *ty != self.context.channel_type {
6664 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6666 } else if their_features.supports_channel_type() {
6667 // Assume they've accepted the channel type as they said they understand it.
6669 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6670 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6671 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6673 self.context.channel_type = channel_type.clone();
6674 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6677 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6678 match &msg.shutdown_scriptpubkey {
6679 &Some(ref script) => {
6680 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6681 if script.len() == 0 {
6684 if !script::is_bolt2_compliant(&script, their_features) {
6685 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6687 Some(script.clone())
6690 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6692 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6697 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6698 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6699 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6700 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6701 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6703 if peer_limits.trust_own_funding_0conf {
6704 self.context.minimum_depth = Some(msg.minimum_depth);
6706 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6709 let counterparty_pubkeys = ChannelPublicKeys {
6710 funding_pubkey: msg.funding_pubkey,
6711 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6712 payment_point: msg.payment_point,
6713 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6714 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6717 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6718 selected_contest_delay: msg.to_self_delay,
6719 pubkeys: counterparty_pubkeys,
6722 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6723 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6725 self.context.channel_state = ChannelState::NegotiatingFunding(
6726 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6728 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6733 /// Handles a funding_signed message from the remote end.
6734 /// If this call is successful, broadcast the funding transaction (and not before!)
6735 pub fn funding_signed<L: Deref>(
6736 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
6737 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
6741 if !self.context.is_outbound() {
6742 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
6744 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
6745 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
6747 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6748 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6749 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6750 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6753 let funding_script = self.context.get_funding_redeemscript();
6755 let counterparty_keys = self.context.build_remote_transaction_keys();
6756 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6757 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6758 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6760 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6761 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6763 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6764 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
6766 let trusted_tx = initial_commitment_tx.trust();
6767 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6768 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6769 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
6770 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
6771 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
6775 let holder_commitment_tx = HolderCommitmentTransaction::new(
6776 initial_commitment_tx,
6779 &self.context.get_holder_pubkeys().funding_pubkey,
6780 self.context.counterparty_funding_pubkey()
6784 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
6785 if validated.is_err() {
6786 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6789 let funding_redeemscript = self.context.get_funding_redeemscript();
6790 let funding_txo = self.context.get_funding_txo().unwrap();
6791 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6792 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6793 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6794 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6795 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6796 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6797 shutdown_script, self.context.get_holder_selected_contest_delay(),
6798 &self.context.destination_script, (funding_txo, funding_txo_script),
6799 &self.context.channel_transaction_parameters,
6800 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6802 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6803 channel_monitor.provide_initial_counterparty_commitment_tx(
6804 counterparty_initial_bitcoin_tx.txid, Vec::new(),
6805 self.context.cur_counterparty_commitment_transaction_number,
6806 self.context.counterparty_cur_commitment_point.unwrap(),
6807 counterparty_initial_commitment_tx.feerate_per_kw(),
6808 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6809 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6811 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
6812 if self.context.is_batch_funding() {
6813 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
6815 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
6817 self.context.cur_holder_commitment_transaction_number -= 1;
6818 self.context.cur_counterparty_commitment_transaction_number -= 1;
6820 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
6822 let mut channel = Channel { context: self.context };
6824 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6825 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6826 Ok((channel, channel_monitor))
6829 /// Indicates that the signer may have some signatures for us, so we should retry if we're
6831 #[cfg(async_signing)]
6832 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6833 if self.context.signer_pending_funding && self.context.is_outbound() {
6834 log_trace!(logger, "Signer unblocked a funding_created");
6835 self.get_funding_created_msg(logger)
6840 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6841 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6842 pub context: ChannelContext<SP>,
6843 pub unfunded_context: UnfundedChannelContext,
6846 /// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
6847 /// [`msgs::OpenChannel`].
6848 pub(super) fn channel_type_from_open_channel(
6849 msg: &msgs::OpenChannel, their_features: &InitFeatures,
6850 our_supported_features: &ChannelTypeFeatures
6851 ) -> Result<ChannelTypeFeatures, ChannelError> {
6852 if let Some(channel_type) = &msg.channel_type {
6853 if channel_type.supports_any_optional_bits() {
6854 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6857 // We only support the channel types defined by the `ChannelManager` in
6858 // `provided_channel_type_features`. The channel type must always support
6859 // `static_remote_key`.
6860 if !channel_type.requires_static_remote_key() {
6861 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6863 // Make sure we support all of the features behind the channel type.
6864 if !channel_type.is_subset(our_supported_features) {
6865 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6867 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6868 if channel_type.requires_scid_privacy() && announced_channel {
6869 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6871 Ok(channel_type.clone())
6873 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6874 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6875 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6881 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6882 /// Creates a new channel from a remote sides' request for one.
6883 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6884 pub fn new<ES: Deref, F: Deref, L: Deref>(
6885 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6886 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6887 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6888 current_chain_height: u32, logger: &L, is_0conf: bool,
6889 ) -> Result<InboundV1Channel<SP>, ChannelError>
6890 where ES::Target: EntropySource,
6891 F::Target: FeeEstimator,
6894 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6895 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6897 // First check the channel type is known, failing before we do anything else if we don't
6898 // support this channel type.
6899 let channel_type = channel_type_from_open_channel(msg, their_features, our_supported_features)?;
6901 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6902 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6903 let pubkeys = holder_signer.pubkeys().clone();
6904 let counterparty_pubkeys = ChannelPublicKeys {
6905 funding_pubkey: msg.funding_pubkey,
6906 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6907 payment_point: msg.payment_point,
6908 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6909 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6912 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6913 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6916 // Check sanity of message fields:
6917 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6918 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6920 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6921 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6923 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6924 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6926 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6927 if msg.push_msat > full_channel_value_msat {
6928 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6930 if msg.dust_limit_satoshis > msg.funding_satoshis {
6931 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6933 if msg.htlc_minimum_msat >= full_channel_value_msat {
6934 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6936 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6938 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6939 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6940 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6942 if msg.max_accepted_htlcs < 1 {
6943 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6945 if msg.max_accepted_htlcs > MAX_HTLCS {
6946 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6949 // Now check against optional parameters as set by config...
6950 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6951 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6953 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6954 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6956 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6957 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6959 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6960 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6962 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6963 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6965 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6966 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6968 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6969 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6972 // Convert things into internal flags and prep our state:
6974 if config.channel_handshake_limits.force_announced_channel_preference {
6975 if config.channel_handshake_config.announced_channel != announced_channel {
6976 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6980 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6981 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6982 // Protocol level safety check in place, although it should never happen because
6983 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6984 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6986 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6987 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6989 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6990 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6991 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6993 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6994 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6997 // check if the funder's amount for the initial commitment tx is sufficient
6998 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6999 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
7000 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
7004 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
7005 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
7006 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
7007 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
7010 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
7011 // While it's reasonable for us to not meet the channel reserve initially (if they don't
7012 // want to push much to us), our counterparty should always have more than our reserve.
7013 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
7014 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
7017 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7018 match &msg.shutdown_scriptpubkey {
7019 &Some(ref script) => {
7020 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7021 if script.len() == 0 {
7024 if !script::is_bolt2_compliant(&script, their_features) {
7025 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
7027 Some(script.clone())
7030 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7032 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7037 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
7038 match signer_provider.get_shutdown_scriptpubkey() {
7039 Ok(scriptpubkey) => Some(scriptpubkey),
7040 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
7044 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
7045 if !shutdown_scriptpubkey.is_compatible(&their_features) {
7046 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
7050 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
7051 Ok(script) => script,
7052 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
7055 let mut secp_ctx = Secp256k1::new();
7056 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7058 let minimum_depth = if is_0conf {
7061 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
7065 context: ChannelContext {
7068 config: LegacyChannelConfig {
7069 options: config.channel_config.clone(),
7071 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
7076 inbound_handshake_limits_override: None,
7078 temporary_channel_id: Some(msg.temporary_channel_id),
7079 channel_id: msg.temporary_channel_id,
7080 channel_state: ChannelState::NegotiatingFunding(
7081 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7083 announcement_sigs_state: AnnouncementSigsState::NotSent,
7086 latest_monitor_update_id: 0,
7088 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7089 shutdown_scriptpubkey,
7092 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7093 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7094 value_to_self_msat: msg.push_msat,
7096 pending_inbound_htlcs: Vec::new(),
7097 pending_outbound_htlcs: Vec::new(),
7098 holding_cell_htlc_updates: Vec::new(),
7099 pending_update_fee: None,
7100 holding_cell_update_fee: None,
7101 next_holder_htlc_id: 0,
7102 next_counterparty_htlc_id: 0,
7103 update_time_counter: 1,
7105 resend_order: RAACommitmentOrder::CommitmentFirst,
7107 monitor_pending_channel_ready: false,
7108 monitor_pending_revoke_and_ack: false,
7109 monitor_pending_commitment_signed: false,
7110 monitor_pending_forwards: Vec::new(),
7111 monitor_pending_failures: Vec::new(),
7112 monitor_pending_finalized_fulfills: Vec::new(),
7114 signer_pending_commitment_update: false,
7115 signer_pending_funding: false,
7117 #[cfg(debug_assertions)]
7118 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7119 #[cfg(debug_assertions)]
7120 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7122 last_sent_closing_fee: None,
7123 pending_counterparty_closing_signed: None,
7124 expecting_peer_commitment_signed: false,
7125 closing_fee_limits: None,
7126 target_closing_feerate_sats_per_kw: None,
7128 funding_tx_confirmed_in: None,
7129 funding_tx_confirmation_height: 0,
7130 short_channel_id: None,
7131 channel_creation_height: current_chain_height,
7133 feerate_per_kw: msg.feerate_per_kw,
7134 channel_value_satoshis: msg.funding_satoshis,
7135 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
7136 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7137 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
7138 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
7139 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7140 holder_selected_channel_reserve_satoshis,
7141 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
7142 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7143 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
7144 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7147 counterparty_forwarding_info: None,
7149 channel_transaction_parameters: ChannelTransactionParameters {
7150 holder_pubkeys: pubkeys,
7151 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7152 is_outbound_from_holder: false,
7153 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7154 selected_contest_delay: msg.to_self_delay,
7155 pubkeys: counterparty_pubkeys,
7157 funding_outpoint: None,
7158 channel_type_features: channel_type.clone()
7160 funding_transaction: None,
7161 is_batch_funding: None,
7163 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7164 counterparty_prev_commitment_point: None,
7165 counterparty_node_id,
7167 counterparty_shutdown_scriptpubkey,
7169 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7171 channel_update_status: ChannelUpdateStatus::Enabled,
7172 closing_signed_in_flight: false,
7174 announcement_sigs: None,
7176 #[cfg(any(test, fuzzing))]
7177 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7178 #[cfg(any(test, fuzzing))]
7179 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7181 workaround_lnd_bug_4006: None,
7182 sent_message_awaiting_response: None,
7184 latest_inbound_scid_alias: None,
7185 outbound_scid_alias: 0,
7187 channel_pending_event_emitted: false,
7188 channel_ready_event_emitted: false,
7190 #[cfg(any(test, fuzzing))]
7191 historical_inbound_htlc_fulfills: HashSet::new(),
7196 blocked_monitor_updates: Vec::new(),
7198 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7204 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7205 /// should be sent back to the counterparty node.
7207 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7208 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7209 if self.context.is_outbound() {
7210 panic!("Tried to send accept_channel for an outbound channel?");
7213 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7214 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7216 panic!("Tried to send accept_channel after channel had moved forward");
7218 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7219 panic!("Tried to send an accept_channel for a channel that has already advanced");
7222 self.generate_accept_channel_message()
7225 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7226 /// inbound channel. If the intention is to accept an inbound channel, use
7227 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7229 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7230 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7231 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7232 let keys = self.context.get_holder_pubkeys();
7234 msgs::AcceptChannel {
7235 temporary_channel_id: self.context.channel_id,
7236 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7237 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7238 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7239 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7240 minimum_depth: self.context.minimum_depth.unwrap(),
7241 to_self_delay: self.context.get_holder_selected_contest_delay(),
7242 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7243 funding_pubkey: keys.funding_pubkey,
7244 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7245 payment_point: keys.payment_point,
7246 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7247 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7248 first_per_commitment_point,
7249 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7250 Some(script) => script.clone().into_inner(),
7251 None => Builder::new().into_script(),
7253 channel_type: Some(self.context.channel_type.clone()),
7255 next_local_nonce: None,
7259 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7260 /// inbound channel without accepting it.
7262 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7264 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7265 self.generate_accept_channel_message()
7268 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7269 let funding_script = self.context.get_funding_redeemscript();
7271 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7272 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7273 let trusted_tx = initial_commitment_tx.trust();
7274 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7275 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7276 // They sign the holder commitment transaction...
7277 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7278 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7279 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7280 encode::serialize_hex(&funding_script), &self.context.channel_id());
7281 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7283 Ok(initial_commitment_tx)
7286 pub fn funding_created<L: Deref>(
7287 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7288 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7292 if self.context.is_outbound() {
7293 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7296 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7297 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7299 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7300 // remember the channel, so it's safe to just send an error_message here and drop the
7302 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7304 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7305 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7306 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7307 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7310 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7311 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7312 // This is an externally observable change before we finish all our checks. In particular
7313 // check_funding_created_signature may fail.
7314 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7316 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7318 Err(ChannelError::Close(e)) => {
7319 self.context.channel_transaction_parameters.funding_outpoint = None;
7320 return Err((self, ChannelError::Close(e)));
7323 // The only error we know how to handle is ChannelError::Close, so we fall over here
7324 // to make sure we don't continue with an inconsistent state.
7325 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7329 let holder_commitment_tx = HolderCommitmentTransaction::new(
7330 initial_commitment_tx,
7333 &self.context.get_holder_pubkeys().funding_pubkey,
7334 self.context.counterparty_funding_pubkey()
7337 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7338 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7341 // Now that we're past error-generating stuff, update our local state:
7343 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7344 self.context.channel_id = funding_txo.to_channel_id();
7345 self.context.cur_counterparty_commitment_transaction_number -= 1;
7346 self.context.cur_holder_commitment_transaction_number -= 1;
7348 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7350 let funding_redeemscript = self.context.get_funding_redeemscript();
7351 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7352 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7353 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7354 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7355 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7356 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7357 shutdown_script, self.context.get_holder_selected_contest_delay(),
7358 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7359 &self.context.channel_transaction_parameters,
7360 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7362 holder_commitment_tx, best_block, self.context.counterparty_node_id);
7363 channel_monitor.provide_initial_counterparty_commitment_tx(
7364 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7365 self.context.cur_counterparty_commitment_transaction_number + 1,
7366 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7367 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7368 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7370 log_info!(logger, "{} funding_signed for peer for channel {}",
7371 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7373 // Promote the channel to a full-fledged one now that we have updated the state and have a
7374 // `ChannelMonitor`.
7375 let mut channel = Channel {
7376 context: self.context,
7378 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7379 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7381 Ok((channel, funding_signed, channel_monitor))
7385 const SERIALIZATION_VERSION: u8 = 3;
7386 const MIN_SERIALIZATION_VERSION: u8 = 3;
7388 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7394 impl Writeable for ChannelUpdateStatus {
7395 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7396 // We only care about writing out the current state as it was announced, ie only either
7397 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7398 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7400 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7401 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7402 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7403 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7409 impl Readable for ChannelUpdateStatus {
7410 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7411 Ok(match <u8 as Readable>::read(reader)? {
7412 0 => ChannelUpdateStatus::Enabled,
7413 1 => ChannelUpdateStatus::Disabled,
7414 _ => return Err(DecodeError::InvalidValue),
7419 impl Writeable for AnnouncementSigsState {
7420 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7421 // We only care about writing out the current state as if we had just disconnected, at
7422 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7424 AnnouncementSigsState::NotSent => 0u8.write(writer),
7425 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7426 AnnouncementSigsState::Committed => 0u8.write(writer),
7427 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7432 impl Readable for AnnouncementSigsState {
7433 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7434 Ok(match <u8 as Readable>::read(reader)? {
7435 0 => AnnouncementSigsState::NotSent,
7436 1 => AnnouncementSigsState::PeerReceived,
7437 _ => return Err(DecodeError::InvalidValue),
7442 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7443 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7444 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7447 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7449 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7450 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7451 // the low bytes now and the optional high bytes later.
7452 let user_id_low = self.context.user_id as u64;
7453 user_id_low.write(writer)?;
7455 // Version 1 deserializers expected to read parts of the config object here. Version 2
7456 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7457 // `minimum_depth` we simply write dummy values here.
7458 writer.write_all(&[0; 8])?;
7460 self.context.channel_id.write(writer)?;
7462 let mut channel_state = self.context.channel_state;
7463 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7464 channel_state.set_peer_disconnected();
7466 channel_state.to_u32().write(writer)?;
7468 self.context.channel_value_satoshis.write(writer)?;
7470 self.context.latest_monitor_update_id.write(writer)?;
7472 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7473 // deserialized from that format.
7474 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7475 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7476 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7478 self.context.destination_script.write(writer)?;
7480 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7481 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7482 self.context.value_to_self_msat.write(writer)?;
7484 let mut dropped_inbound_htlcs = 0;
7485 for htlc in self.context.pending_inbound_htlcs.iter() {
7486 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7487 dropped_inbound_htlcs += 1;
7490 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7491 for htlc in self.context.pending_inbound_htlcs.iter() {
7492 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7495 htlc.htlc_id.write(writer)?;
7496 htlc.amount_msat.write(writer)?;
7497 htlc.cltv_expiry.write(writer)?;
7498 htlc.payment_hash.write(writer)?;
7500 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7501 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7503 htlc_state.write(writer)?;
7505 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7507 htlc_state.write(writer)?;
7509 &InboundHTLCState::Committed => {
7512 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7514 removal_reason.write(writer)?;
7519 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7520 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7521 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7523 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7524 for htlc in self.context.pending_outbound_htlcs.iter() {
7525 htlc.htlc_id.write(writer)?;
7526 htlc.amount_msat.write(writer)?;
7527 htlc.cltv_expiry.write(writer)?;
7528 htlc.payment_hash.write(writer)?;
7529 htlc.source.write(writer)?;
7531 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7533 onion_packet.write(writer)?;
7535 &OutboundHTLCState::Committed => {
7538 &OutboundHTLCState::RemoteRemoved(_) => {
7539 // Treat this as a Committed because we haven't received the CS - they'll
7540 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7543 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7545 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7546 preimages.push(preimage);
7548 let reason: Option<&HTLCFailReason> = outcome.into();
7549 reason.write(writer)?;
7551 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7553 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7554 preimages.push(preimage);
7556 let reason: Option<&HTLCFailReason> = outcome.into();
7557 reason.write(writer)?;
7560 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7561 pending_outbound_blinding_points.push(htlc.blinding_point);
7564 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7565 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7566 // Vec of (htlc_id, failure_code, sha256_of_onion)
7567 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
7568 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7569 for update in self.context.holding_cell_htlc_updates.iter() {
7571 &HTLCUpdateAwaitingACK::AddHTLC {
7572 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7573 blinding_point, skimmed_fee_msat,
7576 amount_msat.write(writer)?;
7577 cltv_expiry.write(writer)?;
7578 payment_hash.write(writer)?;
7579 source.write(writer)?;
7580 onion_routing_packet.write(writer)?;
7582 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7583 holding_cell_blinding_points.push(blinding_point);
7585 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7587 payment_preimage.write(writer)?;
7588 htlc_id.write(writer)?;
7590 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7592 htlc_id.write(writer)?;
7593 err_packet.write(writer)?;
7595 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
7596 htlc_id, failure_code, sha256_of_onion
7598 // We don't want to break downgrading by adding a new variant, so write a dummy
7599 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
7600 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
7602 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
7604 htlc_id.write(writer)?;
7605 dummy_err_packet.write(writer)?;
7610 match self.context.resend_order {
7611 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7612 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7615 self.context.monitor_pending_channel_ready.write(writer)?;
7616 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7617 self.context.monitor_pending_commitment_signed.write(writer)?;
7619 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7620 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7621 pending_forward.write(writer)?;
7622 htlc_id.write(writer)?;
7625 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7626 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7627 htlc_source.write(writer)?;
7628 payment_hash.write(writer)?;
7629 fail_reason.write(writer)?;
7632 if self.context.is_outbound() {
7633 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7634 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7635 Some(feerate).write(writer)?;
7637 // As for inbound HTLCs, if the update was only announced and never committed in a
7638 // commitment_signed, drop it.
7639 None::<u32>.write(writer)?;
7641 self.context.holding_cell_update_fee.write(writer)?;
7643 self.context.next_holder_htlc_id.write(writer)?;
7644 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7645 self.context.update_time_counter.write(writer)?;
7646 self.context.feerate_per_kw.write(writer)?;
7648 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7649 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7650 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7651 // consider the stale state on reload.
7654 self.context.funding_tx_confirmed_in.write(writer)?;
7655 self.context.funding_tx_confirmation_height.write(writer)?;
7656 self.context.short_channel_id.write(writer)?;
7658 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7659 self.context.holder_dust_limit_satoshis.write(writer)?;
7660 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7662 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7663 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7665 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7666 self.context.holder_htlc_minimum_msat.write(writer)?;
7667 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7669 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7670 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7672 match &self.context.counterparty_forwarding_info {
7675 info.fee_base_msat.write(writer)?;
7676 info.fee_proportional_millionths.write(writer)?;
7677 info.cltv_expiry_delta.write(writer)?;
7679 None => 0u8.write(writer)?
7682 self.context.channel_transaction_parameters.write(writer)?;
7683 self.context.funding_transaction.write(writer)?;
7685 self.context.counterparty_cur_commitment_point.write(writer)?;
7686 self.context.counterparty_prev_commitment_point.write(writer)?;
7687 self.context.counterparty_node_id.write(writer)?;
7689 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7691 self.context.commitment_secrets.write(writer)?;
7693 self.context.channel_update_status.write(writer)?;
7695 #[cfg(any(test, fuzzing))]
7696 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7697 #[cfg(any(test, fuzzing))]
7698 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7699 htlc.write(writer)?;
7702 // If the channel type is something other than only-static-remote-key, then we need to have
7703 // older clients fail to deserialize this channel at all. If the type is
7704 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7706 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7707 Some(&self.context.channel_type) } else { None };
7709 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7710 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7711 // a different percentage of the channel value then 10%, which older versions of LDK used
7712 // to set it to before the percentage was made configurable.
7713 let serialized_holder_selected_reserve =
7714 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7715 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7717 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7718 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7719 let serialized_holder_htlc_max_in_flight =
7720 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7721 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7723 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7724 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7726 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7727 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7728 // we write the high bytes as an option here.
7729 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7731 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7733 write_tlv_fields!(writer, {
7734 (0, self.context.announcement_sigs, option),
7735 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7736 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7737 // them twice, once with their original default values above, and once as an option
7738 // here. On the read side, old versions will simply ignore the odd-type entries here,
7739 // and new versions map the default values to None and allow the TLV entries here to
7741 (1, self.context.minimum_depth, option),
7742 (2, chan_type, option),
7743 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7744 (4, serialized_holder_selected_reserve, option),
7745 (5, self.context.config, required),
7746 (6, serialized_holder_htlc_max_in_flight, option),
7747 (7, self.context.shutdown_scriptpubkey, option),
7748 (8, self.context.blocked_monitor_updates, optional_vec),
7749 (9, self.context.target_closing_feerate_sats_per_kw, option),
7750 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7751 (13, self.context.channel_creation_height, required),
7752 (15, preimages, required_vec),
7753 (17, self.context.announcement_sigs_state, required),
7754 (19, self.context.latest_inbound_scid_alias, option),
7755 (21, self.context.outbound_scid_alias, required),
7756 (23, channel_ready_event_emitted, option),
7757 (25, user_id_high_opt, option),
7758 (27, self.context.channel_keys_id, required),
7759 (28, holder_max_accepted_htlcs, option),
7760 (29, self.context.temporary_channel_id, option),
7761 (31, channel_pending_event_emitted, option),
7762 (35, pending_outbound_skimmed_fees, optional_vec),
7763 (37, holding_cell_skimmed_fees, optional_vec),
7764 (38, self.context.is_batch_funding, option),
7765 (39, pending_outbound_blinding_points, optional_vec),
7766 (41, holding_cell_blinding_points, optional_vec),
7767 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
7774 const MAX_ALLOC_SIZE: usize = 64*1024;
7775 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7777 ES::Target: EntropySource,
7778 SP::Target: SignerProvider
7780 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7781 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7782 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7784 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7785 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7786 // the low bytes now and the high bytes later.
7787 let user_id_low: u64 = Readable::read(reader)?;
7789 let mut config = Some(LegacyChannelConfig::default());
7791 // Read the old serialization of the ChannelConfig from version 0.0.98.
7792 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7793 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7794 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7795 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7797 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7798 let mut _val: u64 = Readable::read(reader)?;
7801 let channel_id = Readable::read(reader)?;
7802 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
7803 let channel_value_satoshis = Readable::read(reader)?;
7805 let latest_monitor_update_id = Readable::read(reader)?;
7807 let mut keys_data = None;
7809 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7810 // the `channel_keys_id` TLV is present below.
7811 let keys_len: u32 = Readable::read(reader)?;
7812 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7813 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7814 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7815 let mut data = [0; 1024];
7816 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7817 reader.read_exact(read_slice)?;
7818 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7822 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7823 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7824 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7827 let destination_script = Readable::read(reader)?;
7829 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7830 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7831 let value_to_self_msat = Readable::read(reader)?;
7833 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7835 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7836 for _ in 0..pending_inbound_htlc_count {
7837 pending_inbound_htlcs.push(InboundHTLCOutput {
7838 htlc_id: Readable::read(reader)?,
7839 amount_msat: Readable::read(reader)?,
7840 cltv_expiry: Readable::read(reader)?,
7841 payment_hash: Readable::read(reader)?,
7842 state: match <u8 as Readable>::read(reader)? {
7843 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7844 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7845 3 => InboundHTLCState::Committed,
7846 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7847 _ => return Err(DecodeError::InvalidValue),
7852 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7853 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7854 for _ in 0..pending_outbound_htlc_count {
7855 pending_outbound_htlcs.push(OutboundHTLCOutput {
7856 htlc_id: Readable::read(reader)?,
7857 amount_msat: Readable::read(reader)?,
7858 cltv_expiry: Readable::read(reader)?,
7859 payment_hash: Readable::read(reader)?,
7860 source: Readable::read(reader)?,
7861 state: match <u8 as Readable>::read(reader)? {
7862 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7863 1 => OutboundHTLCState::Committed,
7865 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7866 OutboundHTLCState::RemoteRemoved(option.into())
7869 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7870 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7873 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7874 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7876 _ => return Err(DecodeError::InvalidValue),
7878 skimmed_fee_msat: None,
7879 blinding_point: None,
7883 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7884 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7885 for _ in 0..holding_cell_htlc_update_count {
7886 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7887 0 => HTLCUpdateAwaitingACK::AddHTLC {
7888 amount_msat: Readable::read(reader)?,
7889 cltv_expiry: Readable::read(reader)?,
7890 payment_hash: Readable::read(reader)?,
7891 source: Readable::read(reader)?,
7892 onion_routing_packet: Readable::read(reader)?,
7893 skimmed_fee_msat: None,
7894 blinding_point: None,
7896 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7897 payment_preimage: Readable::read(reader)?,
7898 htlc_id: Readable::read(reader)?,
7900 2 => HTLCUpdateAwaitingACK::FailHTLC {
7901 htlc_id: Readable::read(reader)?,
7902 err_packet: Readable::read(reader)?,
7904 _ => return Err(DecodeError::InvalidValue),
7908 let resend_order = match <u8 as Readable>::read(reader)? {
7909 0 => RAACommitmentOrder::CommitmentFirst,
7910 1 => RAACommitmentOrder::RevokeAndACKFirst,
7911 _ => return Err(DecodeError::InvalidValue),
7914 let monitor_pending_channel_ready = Readable::read(reader)?;
7915 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7916 let monitor_pending_commitment_signed = Readable::read(reader)?;
7918 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7919 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7920 for _ in 0..monitor_pending_forwards_count {
7921 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7924 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7925 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7926 for _ in 0..monitor_pending_failures_count {
7927 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7930 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7932 let holding_cell_update_fee = Readable::read(reader)?;
7934 let next_holder_htlc_id = Readable::read(reader)?;
7935 let next_counterparty_htlc_id = Readable::read(reader)?;
7936 let update_time_counter = Readable::read(reader)?;
7937 let feerate_per_kw = Readable::read(reader)?;
7939 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7940 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7941 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7942 // consider the stale state on reload.
7943 match <u8 as Readable>::read(reader)? {
7946 let _: u32 = Readable::read(reader)?;
7947 let _: u64 = Readable::read(reader)?;
7948 let _: Signature = Readable::read(reader)?;
7950 _ => return Err(DecodeError::InvalidValue),
7953 let funding_tx_confirmed_in = Readable::read(reader)?;
7954 let funding_tx_confirmation_height = Readable::read(reader)?;
7955 let short_channel_id = Readable::read(reader)?;
7957 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7958 let holder_dust_limit_satoshis = Readable::read(reader)?;
7959 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7960 let mut counterparty_selected_channel_reserve_satoshis = None;
7962 // Read the old serialization from version 0.0.98.
7963 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7965 // Read the 8 bytes of backwards-compatibility data.
7966 let _dummy: u64 = Readable::read(reader)?;
7968 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7969 let holder_htlc_minimum_msat = Readable::read(reader)?;
7970 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7972 let mut minimum_depth = None;
7974 // Read the old serialization from version 0.0.98.
7975 minimum_depth = Some(Readable::read(reader)?);
7977 // Read the 4 bytes of backwards-compatibility data.
7978 let _dummy: u32 = Readable::read(reader)?;
7981 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7983 1 => Some(CounterpartyForwardingInfo {
7984 fee_base_msat: Readable::read(reader)?,
7985 fee_proportional_millionths: Readable::read(reader)?,
7986 cltv_expiry_delta: Readable::read(reader)?,
7988 _ => return Err(DecodeError::InvalidValue),
7991 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7992 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7994 let counterparty_cur_commitment_point = Readable::read(reader)?;
7996 let counterparty_prev_commitment_point = Readable::read(reader)?;
7997 let counterparty_node_id = Readable::read(reader)?;
7999 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
8000 let commitment_secrets = Readable::read(reader)?;
8002 let channel_update_status = Readable::read(reader)?;
8004 #[cfg(any(test, fuzzing))]
8005 let mut historical_inbound_htlc_fulfills = HashSet::new();
8006 #[cfg(any(test, fuzzing))]
8008 let htlc_fulfills_len: u64 = Readable::read(reader)?;
8009 for _ in 0..htlc_fulfills_len {
8010 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
8014 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
8015 Some((feerate, if channel_parameters.is_outbound_from_holder {
8016 FeeUpdateState::Outbound
8018 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
8024 let mut announcement_sigs = None;
8025 let mut target_closing_feerate_sats_per_kw = None;
8026 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
8027 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
8028 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
8029 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
8030 // only, so we default to that if none was written.
8031 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
8032 let mut channel_creation_height = Some(serialized_height);
8033 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
8035 // If we read an old Channel, for simplicity we just treat it as "we never sent an
8036 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
8037 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
8038 let mut latest_inbound_scid_alias = None;
8039 let mut outbound_scid_alias = None;
8040 let mut channel_pending_event_emitted = None;
8041 let mut channel_ready_event_emitted = None;
8043 let mut user_id_high_opt: Option<u64> = None;
8044 let mut channel_keys_id: Option<[u8; 32]> = None;
8045 let mut temporary_channel_id: Option<ChannelId> = None;
8046 let mut holder_max_accepted_htlcs: Option<u16> = None;
8048 let mut blocked_monitor_updates = Some(Vec::new());
8050 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8051 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8053 let mut is_batch_funding: Option<()> = None;
8055 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8056 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8058 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8060 read_tlv_fields!(reader, {
8061 (0, announcement_sigs, option),
8062 (1, minimum_depth, option),
8063 (2, channel_type, option),
8064 (3, counterparty_selected_channel_reserve_satoshis, option),
8065 (4, holder_selected_channel_reserve_satoshis, option),
8066 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8067 (6, holder_max_htlc_value_in_flight_msat, option),
8068 (7, shutdown_scriptpubkey, option),
8069 (8, blocked_monitor_updates, optional_vec),
8070 (9, target_closing_feerate_sats_per_kw, option),
8071 (11, monitor_pending_finalized_fulfills, optional_vec),
8072 (13, channel_creation_height, option),
8073 (15, preimages_opt, optional_vec),
8074 (17, announcement_sigs_state, option),
8075 (19, latest_inbound_scid_alias, option),
8076 (21, outbound_scid_alias, option),
8077 (23, channel_ready_event_emitted, option),
8078 (25, user_id_high_opt, option),
8079 (27, channel_keys_id, option),
8080 (28, holder_max_accepted_htlcs, option),
8081 (29, temporary_channel_id, option),
8082 (31, channel_pending_event_emitted, option),
8083 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8084 (37, holding_cell_skimmed_fees_opt, optional_vec),
8085 (38, is_batch_funding, option),
8086 (39, pending_outbound_blinding_points_opt, optional_vec),
8087 (41, holding_cell_blinding_points_opt, optional_vec),
8088 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8091 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8092 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8093 // If we've gotten to the funding stage of the channel, populate the signer with its
8094 // required channel parameters.
8095 if channel_state >= ChannelState::FundingNegotiated {
8096 holder_signer.provide_channel_parameters(&channel_parameters);
8098 (channel_keys_id, holder_signer)
8100 // `keys_data` can be `None` if we had corrupted data.
8101 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8102 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8103 (holder_signer.channel_keys_id(), holder_signer)
8106 if let Some(preimages) = preimages_opt {
8107 let mut iter = preimages.into_iter();
8108 for htlc in pending_outbound_htlcs.iter_mut() {
8110 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8111 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8113 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8114 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8119 // We expect all preimages to be consumed above
8120 if iter.next().is_some() {
8121 return Err(DecodeError::InvalidValue);
8125 let chan_features = channel_type.as_ref().unwrap();
8126 if !chan_features.is_subset(our_supported_features) {
8127 // If the channel was written by a new version and negotiated with features we don't
8128 // understand yet, refuse to read it.
8129 return Err(DecodeError::UnknownRequiredFeature);
8132 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8133 // To account for that, we're proactively setting/overriding the field here.
8134 channel_parameters.channel_type_features = chan_features.clone();
8136 let mut secp_ctx = Secp256k1::new();
8137 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8139 // `user_id` used to be a single u64 value. In order to remain backwards
8140 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8141 // separate u64 values.
8142 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8144 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8146 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8147 let mut iter = skimmed_fees.into_iter();
8148 for htlc in pending_outbound_htlcs.iter_mut() {
8149 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8151 // We expect all skimmed fees to be consumed above
8152 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8154 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8155 let mut iter = skimmed_fees.into_iter();
8156 for htlc in holding_cell_htlc_updates.iter_mut() {
8157 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8158 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8161 // We expect all skimmed fees to be consumed above
8162 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8164 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8165 let mut iter = blinding_pts.into_iter();
8166 for htlc in pending_outbound_htlcs.iter_mut() {
8167 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8169 // We expect all blinding points to be consumed above
8170 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8172 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8173 let mut iter = blinding_pts.into_iter();
8174 for htlc in holding_cell_htlc_updates.iter_mut() {
8175 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8176 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8179 // We expect all blinding points to be consumed above
8180 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8183 if let Some(malformed_htlcs) = malformed_htlcs {
8184 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
8185 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
8186 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
8187 let matches = *htlc_id == malformed_htlc_id;
8188 if matches { debug_assert!(err_packet.data.is_empty()) }
8191 }).ok_or(DecodeError::InvalidValue)?;
8192 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
8193 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
8195 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
8200 context: ChannelContext {
8203 config: config.unwrap(),
8207 // Note that we don't care about serializing handshake limits as we only ever serialize
8208 // channel data after the handshake has completed.
8209 inbound_handshake_limits_override: None,
8212 temporary_channel_id,
8214 announcement_sigs_state: announcement_sigs_state.unwrap(),
8216 channel_value_satoshis,
8218 latest_monitor_update_id,
8220 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8221 shutdown_scriptpubkey,
8224 cur_holder_commitment_transaction_number,
8225 cur_counterparty_commitment_transaction_number,
8228 holder_max_accepted_htlcs,
8229 pending_inbound_htlcs,
8230 pending_outbound_htlcs,
8231 holding_cell_htlc_updates,
8235 monitor_pending_channel_ready,
8236 monitor_pending_revoke_and_ack,
8237 monitor_pending_commitment_signed,
8238 monitor_pending_forwards,
8239 monitor_pending_failures,
8240 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8242 signer_pending_commitment_update: false,
8243 signer_pending_funding: false,
8246 holding_cell_update_fee,
8247 next_holder_htlc_id,
8248 next_counterparty_htlc_id,
8249 update_time_counter,
8252 #[cfg(debug_assertions)]
8253 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8254 #[cfg(debug_assertions)]
8255 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8257 last_sent_closing_fee: None,
8258 pending_counterparty_closing_signed: None,
8259 expecting_peer_commitment_signed: false,
8260 closing_fee_limits: None,
8261 target_closing_feerate_sats_per_kw,
8263 funding_tx_confirmed_in,
8264 funding_tx_confirmation_height,
8266 channel_creation_height: channel_creation_height.unwrap(),
8268 counterparty_dust_limit_satoshis,
8269 holder_dust_limit_satoshis,
8270 counterparty_max_htlc_value_in_flight_msat,
8271 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8272 counterparty_selected_channel_reserve_satoshis,
8273 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8274 counterparty_htlc_minimum_msat,
8275 holder_htlc_minimum_msat,
8276 counterparty_max_accepted_htlcs,
8279 counterparty_forwarding_info,
8281 channel_transaction_parameters: channel_parameters,
8282 funding_transaction,
8285 counterparty_cur_commitment_point,
8286 counterparty_prev_commitment_point,
8287 counterparty_node_id,
8289 counterparty_shutdown_scriptpubkey,
8293 channel_update_status,
8294 closing_signed_in_flight: false,
8298 #[cfg(any(test, fuzzing))]
8299 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8300 #[cfg(any(test, fuzzing))]
8301 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8303 workaround_lnd_bug_4006: None,
8304 sent_message_awaiting_response: None,
8306 latest_inbound_scid_alias,
8307 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8308 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8310 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8311 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8313 #[cfg(any(test, fuzzing))]
8314 historical_inbound_htlc_fulfills,
8316 channel_type: channel_type.unwrap(),
8319 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8328 use bitcoin::blockdata::constants::ChainHash;
8329 use bitcoin::blockdata::script::{ScriptBuf, Builder};
8330 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8331 use bitcoin::blockdata::opcodes;
8332 use bitcoin::network::constants::Network;
8333 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
8334 use crate::ln::{PaymentHash, PaymentPreimage};
8335 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8336 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8337 use crate::ln::channel::InitFeatures;
8338 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8339 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8340 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8341 use crate::ln::msgs;
8342 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8343 use crate::ln::script::ShutdownScript;
8344 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8345 use crate::chain::BestBlock;
8346 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8347 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8348 use crate::chain::transaction::OutPoint;
8349 use crate::routing::router::{Path, RouteHop};
8350 use crate::util::config::UserConfig;
8351 use crate::util::errors::APIError;
8352 use crate::util::ser::{ReadableArgs, Writeable};
8353 use crate::util::test_utils;
8354 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8355 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8356 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8357 use bitcoin::secp256k1::{SecretKey,PublicKey};
8358 use bitcoin::hashes::sha256::Hash as Sha256;
8359 use bitcoin::hashes::Hash;
8360 use bitcoin::hashes::hex::FromHex;
8361 use bitcoin::hash_types::WPubkeyHash;
8362 use bitcoin::blockdata::locktime::absolute::LockTime;
8363 use bitcoin::address::{WitnessProgram, WitnessVersion};
8364 use crate::prelude::*;
8366 struct TestFeeEstimator {
8369 impl FeeEstimator for TestFeeEstimator {
8370 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8376 fn test_max_funding_satoshis_no_wumbo() {
8377 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8378 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8379 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8383 signer: InMemorySigner,
8386 impl EntropySource for Keys {
8387 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8390 impl SignerProvider for Keys {
8391 type EcdsaSigner = InMemorySigner;
8393 type TaprootSigner = InMemorySigner;
8395 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8396 self.signer.channel_keys_id()
8399 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8403 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8405 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8406 let secp_ctx = Secp256k1::signing_only();
8407 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8408 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8409 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8412 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8413 let secp_ctx = Secp256k1::signing_only();
8414 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8415 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8419 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8420 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8421 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8425 fn upfront_shutdown_script_incompatibility() {
8426 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8427 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8428 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8431 let seed = [42; 32];
8432 let network = Network::Testnet;
8433 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8434 keys_provider.expect(OnGetShutdownScriptpubkey {
8435 returns: non_v0_segwit_shutdown_script.clone(),
8438 let secp_ctx = Secp256k1::new();
8439 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8440 let config = UserConfig::default();
8441 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8442 Err(APIError::IncompatibleShutdownScript { script }) => {
8443 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8445 Err(e) => panic!("Unexpected error: {:?}", e),
8446 Ok(_) => panic!("Expected error"),
8450 // Check that, during channel creation, we use the same feerate in the open channel message
8451 // as we do in the Channel object creation itself.
8453 fn test_open_channel_msg_fee() {
8454 let original_fee = 253;
8455 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8456 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8457 let secp_ctx = Secp256k1::new();
8458 let seed = [42; 32];
8459 let network = Network::Testnet;
8460 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8462 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8463 let config = UserConfig::default();
8464 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8466 // Now change the fee so we can check that the fee in the open_channel message is the
8467 // same as the old fee.
8468 fee_est.fee_est = 500;
8469 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8470 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8474 fn test_holder_vs_counterparty_dust_limit() {
8475 // Test that when calculating the local and remote commitment transaction fees, the correct
8476 // dust limits are used.
8477 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8478 let secp_ctx = Secp256k1::new();
8479 let seed = [42; 32];
8480 let network = Network::Testnet;
8481 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8482 let logger = test_utils::TestLogger::new();
8483 let best_block = BestBlock::from_network(network);
8485 // Go through the flow of opening a channel between two nodes, making sure
8486 // they have different dust limits.
8488 // Create Node A's channel pointing to Node B's pubkey
8489 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8490 let config = UserConfig::default();
8491 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8493 // Create Node B's channel by receiving Node A's open_channel message
8494 // Make sure A's dust limit is as we expect.
8495 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8496 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8497 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8499 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8500 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8501 accept_channel_msg.dust_limit_satoshis = 546;
8502 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8503 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8505 // Node A --> Node B: funding created
8506 let output_script = node_a_chan.context.get_funding_redeemscript();
8507 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8508 value: 10000000, script_pubkey: output_script.clone(),
8510 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8511 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8512 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8514 // Node B --> Node A: funding signed
8515 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8516 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8518 // Put some inbound and outbound HTLCs in A's channel.
8519 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8520 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8522 amount_msat: htlc_amount_msat,
8523 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8524 cltv_expiry: 300000000,
8525 state: InboundHTLCState::Committed,
8528 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8530 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8531 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8532 cltv_expiry: 200000000,
8533 state: OutboundHTLCState::Committed,
8534 source: HTLCSource::OutboundRoute {
8535 path: Path { hops: Vec::new(), blinded_tail: None },
8536 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8537 first_hop_htlc_msat: 548,
8538 payment_id: PaymentId([42; 32]),
8540 skimmed_fee_msat: None,
8541 blinding_point: None,
8544 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8545 // the dust limit check.
8546 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8547 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8548 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8549 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8551 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8552 // of the HTLCs are seen to be above the dust limit.
8553 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8554 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8555 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8556 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8557 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8561 fn test_timeout_vs_success_htlc_dust_limit() {
8562 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8563 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8564 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8565 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8566 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8567 let secp_ctx = Secp256k1::new();
8568 let seed = [42; 32];
8569 let network = Network::Testnet;
8570 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8572 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8573 let config = UserConfig::default();
8574 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8576 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8577 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8579 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8580 // counted as dust when it shouldn't be.
8581 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8582 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8583 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8584 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8586 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8587 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8588 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8589 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8590 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8592 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8594 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8595 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8596 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8597 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8598 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8600 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8601 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8602 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8603 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8604 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8608 fn channel_reestablish_no_updates() {
8609 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8610 let logger = test_utils::TestLogger::new();
8611 let secp_ctx = Secp256k1::new();
8612 let seed = [42; 32];
8613 let network = Network::Testnet;
8614 let best_block = BestBlock::from_network(network);
8615 let chain_hash = ChainHash::using_genesis_block(network);
8616 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8618 // Go through the flow of opening a channel between two nodes.
8620 // Create Node A's channel pointing to Node B's pubkey
8621 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8622 let config = UserConfig::default();
8623 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8625 // Create Node B's channel by receiving Node A's open_channel message
8626 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8627 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8628 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8630 // Node B --> Node A: accept channel
8631 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8632 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8634 // Node A --> Node B: funding created
8635 let output_script = node_a_chan.context.get_funding_redeemscript();
8636 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8637 value: 10000000, script_pubkey: output_script.clone(),
8639 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8640 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8641 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8643 // Node B --> Node A: funding signed
8644 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8645 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8647 // Now disconnect the two nodes and check that the commitment point in
8648 // Node B's channel_reestablish message is sane.
8649 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8650 let msg = node_b_chan.get_channel_reestablish(&&logger);
8651 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8652 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8653 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8655 // Check that the commitment point in Node A's channel_reestablish message
8657 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8658 let msg = node_a_chan.get_channel_reestablish(&&logger);
8659 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8660 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8661 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8665 fn test_configured_holder_max_htlc_value_in_flight() {
8666 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8667 let logger = test_utils::TestLogger::new();
8668 let secp_ctx = Secp256k1::new();
8669 let seed = [42; 32];
8670 let network = Network::Testnet;
8671 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8672 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8673 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8675 let mut config_2_percent = UserConfig::default();
8676 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8677 let mut config_99_percent = UserConfig::default();
8678 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8679 let mut config_0_percent = UserConfig::default();
8680 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8681 let mut config_101_percent = UserConfig::default();
8682 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8684 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8685 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8686 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8687 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8688 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8689 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8691 // Test with the upper bound - 1 of valid values (99%).
8692 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8693 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8694 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8696 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8698 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8699 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8700 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8701 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8702 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8703 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8705 // Test with the upper bound - 1 of valid values (99%).
8706 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8707 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8708 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8710 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8711 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8712 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8713 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8714 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8716 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8717 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8719 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8720 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8721 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8723 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8724 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8725 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8726 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8727 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8729 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8730 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8732 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8733 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8734 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8738 fn test_configured_holder_selected_channel_reserve_satoshis() {
8740 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8741 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8742 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8744 // Test with valid but unreasonably high channel reserves
8745 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8746 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8747 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8749 // Test with calculated channel reserve less than lower bound
8750 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8751 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8753 // Test with invalid channel reserves since sum of both is greater than or equal
8755 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8756 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8759 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8760 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8761 let logger = test_utils::TestLogger::new();
8762 let secp_ctx = Secp256k1::new();
8763 let seed = [42; 32];
8764 let network = Network::Testnet;
8765 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8766 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8767 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8770 let mut outbound_node_config = UserConfig::default();
8771 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8772 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8774 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8775 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8777 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8778 let mut inbound_node_config = UserConfig::default();
8779 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8781 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8782 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8784 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8786 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8787 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8789 // Channel Negotiations failed
8790 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8791 assert!(result.is_err());
8796 fn channel_update() {
8797 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8798 let logger = test_utils::TestLogger::new();
8799 let secp_ctx = Secp256k1::new();
8800 let seed = [42; 32];
8801 let network = Network::Testnet;
8802 let best_block = BestBlock::from_network(network);
8803 let chain_hash = ChainHash::using_genesis_block(network);
8804 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8806 // Create Node A's channel pointing to Node B's pubkey
8807 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8808 let config = UserConfig::default();
8809 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8811 // Create Node B's channel by receiving Node A's open_channel message
8812 // Make sure A's dust limit is as we expect.
8813 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8814 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8815 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8817 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8818 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8819 accept_channel_msg.dust_limit_satoshis = 546;
8820 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8821 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8823 // Node A --> Node B: funding created
8824 let output_script = node_a_chan.context.get_funding_redeemscript();
8825 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8826 value: 10000000, script_pubkey: output_script.clone(),
8828 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8829 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8830 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8832 // Node B --> Node A: funding signed
8833 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8834 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8836 // Make sure that receiving a channel update will update the Channel as expected.
8837 let update = ChannelUpdate {
8838 contents: UnsignedChannelUpdate {
8840 short_channel_id: 0,
8843 cltv_expiry_delta: 100,
8844 htlc_minimum_msat: 5,
8845 htlc_maximum_msat: MAX_VALUE_MSAT,
8847 fee_proportional_millionths: 11,
8848 excess_data: Vec::new(),
8850 signature: Signature::from(unsafe { FFISignature::new() })
8852 assert!(node_a_chan.channel_update(&update).unwrap());
8854 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8855 // change our official htlc_minimum_msat.
8856 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8857 match node_a_chan.context.counterparty_forwarding_info() {
8859 assert_eq!(info.cltv_expiry_delta, 100);
8860 assert_eq!(info.fee_base_msat, 110);
8861 assert_eq!(info.fee_proportional_millionths, 11);
8863 None => panic!("expected counterparty forwarding info to be Some")
8866 assert!(!node_a_chan.channel_update(&update).unwrap());
8870 fn blinding_point_skimmed_fee_malformed_ser() {
8871 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
8873 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8874 let secp_ctx = Secp256k1::new();
8875 let seed = [42; 32];
8876 let network = Network::Testnet;
8877 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8879 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8880 let config = UserConfig::default();
8881 let features = channelmanager::provided_init_features(&config);
8882 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8883 let mut chan = Channel { context: outbound_chan.context };
8885 let dummy_htlc_source = HTLCSource::OutboundRoute {
8887 hops: vec![RouteHop {
8888 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8889 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8890 cltv_expiry_delta: 0, maybe_announced_channel: false,
8894 session_priv: test_utils::privkey(42),
8895 first_hop_htlc_msat: 0,
8896 payment_id: PaymentId([42; 32]),
8898 let dummy_outbound_output = OutboundHTLCOutput {
8901 payment_hash: PaymentHash([43; 32]),
8903 state: OutboundHTLCState::Committed,
8904 source: dummy_htlc_source.clone(),
8905 skimmed_fee_msat: None,
8906 blinding_point: None,
8908 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8909 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8911 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8914 htlc.skimmed_fee_msat = Some(1);
8917 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8919 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8922 payment_hash: PaymentHash([43; 32]),
8923 source: dummy_htlc_source.clone(),
8924 onion_routing_packet: msgs::OnionPacket {
8926 public_key: Ok(test_utils::pubkey(1)),
8927 hop_data: [0; 20*65],
8930 skimmed_fee_msat: None,
8931 blinding_point: None,
8933 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8934 payment_preimage: PaymentPreimage([42; 32]),
8937 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
8938 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
8940 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
8941 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
8943 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
8946 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8947 } else if i % 5 == 1 {
8948 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8949 } else if i % 5 == 2 {
8950 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8951 if let HTLCUpdateAwaitingACK::AddHTLC {
8952 ref mut blinding_point, ref mut skimmed_fee_msat, ..
8953 } = &mut dummy_add {
8954 *blinding_point = Some(test_utils::pubkey(42 + i));
8955 *skimmed_fee_msat = Some(42);
8957 holding_cell_htlc_updates.push(dummy_add);
8958 } else if i % 5 == 3 {
8959 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
8961 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
8964 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8966 // Encode and decode the channel and ensure that the HTLCs within are the same.
8967 let encoded_chan = chan.encode();
8968 let mut s = crate::io::Cursor::new(&encoded_chan);
8969 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8970 let features = channelmanager::provided_channel_type_features(&config);
8971 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8972 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8973 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8976 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8978 fn outbound_commitment_test() {
8979 use bitcoin::sighash;
8980 use bitcoin::consensus::encode::serialize;
8981 use bitcoin::sighash::EcdsaSighashType;
8982 use bitcoin::hashes::hex::FromHex;
8983 use bitcoin::hash_types::Txid;
8984 use bitcoin::secp256k1::Message;
8985 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8986 use crate::ln::PaymentPreimage;
8987 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8988 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8989 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8990 use crate::util::logger::Logger;
8991 use crate::sync::Arc;
8992 use core::str::FromStr;
8993 use hex::DisplayHex;
8995 // Test vectors from BOLT 3 Appendices C and F (anchors):
8996 let feeest = TestFeeEstimator{fee_est: 15000};
8997 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
8998 let secp_ctx = Secp256k1::new();
9000 let mut signer = InMemorySigner::new(
9002 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
9003 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9004 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9005 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
9006 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9008 // These aren't set in the test vectors:
9009 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
9015 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
9016 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
9017 let keys_provider = Keys { signer: signer.clone() };
9019 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9020 let mut config = UserConfig::default();
9021 config.channel_handshake_config.announced_channel = false;
9022 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
9023 chan.context.holder_dust_limit_satoshis = 546;
9024 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
9026 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
9028 let counterparty_pubkeys = ChannelPublicKeys {
9029 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
9030 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
9031 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
9032 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
9033 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
9035 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
9036 CounterpartyChannelTransactionParameters {
9037 pubkeys: counterparty_pubkeys.clone(),
9038 selected_contest_delay: 144
9040 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
9041 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9043 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9044 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9046 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9047 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9049 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9050 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9052 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9053 // derived from a commitment_seed, so instead we copy it here and call
9054 // build_commitment_transaction.
9055 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9056 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9057 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9058 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9059 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9061 macro_rules! test_commitment {
9062 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9063 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9064 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9068 macro_rules! test_commitment_with_anchors {
9069 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9070 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9071 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9075 macro_rules! test_commitment_common {
9076 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9077 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9079 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9080 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9082 let htlcs = commitment_stats.htlcs_included.drain(..)
9083 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9085 (commitment_stats.tx, htlcs)
9087 let trusted_tx = commitment_tx.trust();
9088 let unsigned_tx = trusted_tx.built_transaction();
9089 let redeemscript = chan.context.get_funding_redeemscript();
9090 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9091 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9092 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9093 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9095 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9096 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9097 let mut counterparty_htlc_sigs = Vec::new();
9098 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9100 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9101 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9102 counterparty_htlc_sigs.push(remote_signature);
9104 assert_eq!(htlcs.len(), per_htlc.len());
9106 let holder_commitment_tx = HolderCommitmentTransaction::new(
9107 commitment_tx.clone(),
9108 counterparty_signature,
9109 counterparty_htlc_sigs,
9110 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9111 chan.context.counterparty_funding_pubkey()
9113 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9114 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9116 let funding_redeemscript = chan.context.get_funding_redeemscript();
9117 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9118 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9120 // ((htlc, counterparty_sig), (index, holder_sig))
9121 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
9124 log_trace!(logger, "verifying htlc {}", $htlc_idx);
9125 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9127 let ref htlc = htlcs[$htlc_idx];
9128 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
9129 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9130 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9131 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9132 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9133 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9134 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9136 let mut preimage: Option<PaymentPreimage> = None;
9139 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9140 if out == htlc.payment_hash {
9141 preimage = Some(PaymentPreimage([i; 32]));
9145 assert!(preimage.is_some());
9148 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
9149 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
9150 channel_derivation_parameters: ChannelDerivationParameters {
9151 value_satoshis: chan.context.channel_value_satoshis,
9152 keys_id: chan.context.channel_keys_id,
9153 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9155 commitment_txid: trusted_tx.txid(),
9156 per_commitment_number: trusted_tx.commitment_number(),
9157 per_commitment_point: trusted_tx.per_commitment_point(),
9158 feerate_per_kw: trusted_tx.feerate_per_kw(),
9160 preimage: preimage.clone(),
9161 counterparty_sig: *htlc_counterparty_sig,
9162 }, &secp_ctx).unwrap();
9163 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9164 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9166 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9167 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9168 let trusted_tx = holder_commitment_tx.trust();
9169 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9170 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9171 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9173 assert!(htlc_counterparty_sig_iter.next().is_none());
9177 // anchors: simple commitment tx with no HTLCs and single anchor
9178 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9179 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9180 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9182 // simple commitment tx with no HTLCs
9183 chan.context.value_to_self_msat = 7000000000;
9185 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9186 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9187 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9189 // anchors: simple commitment tx with no HTLCs
9190 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9191 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9192 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9194 chan.context.pending_inbound_htlcs.push({
9195 let mut out = InboundHTLCOutput{
9197 amount_msat: 1000000,
9199 payment_hash: PaymentHash([0; 32]),
9200 state: InboundHTLCState::Committed,
9202 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9205 chan.context.pending_inbound_htlcs.push({
9206 let mut out = InboundHTLCOutput{
9208 amount_msat: 2000000,
9210 payment_hash: PaymentHash([0; 32]),
9211 state: InboundHTLCState::Committed,
9213 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9216 chan.context.pending_outbound_htlcs.push({
9217 let mut out = OutboundHTLCOutput{
9219 amount_msat: 2000000,
9221 payment_hash: PaymentHash([0; 32]),
9222 state: OutboundHTLCState::Committed,
9223 source: HTLCSource::dummy(),
9224 skimmed_fee_msat: None,
9225 blinding_point: None,
9227 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9230 chan.context.pending_outbound_htlcs.push({
9231 let mut out = OutboundHTLCOutput{
9233 amount_msat: 3000000,
9235 payment_hash: PaymentHash([0; 32]),
9236 state: OutboundHTLCState::Committed,
9237 source: HTLCSource::dummy(),
9238 skimmed_fee_msat: None,
9239 blinding_point: None,
9241 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9244 chan.context.pending_inbound_htlcs.push({
9245 let mut out = InboundHTLCOutput{
9247 amount_msat: 4000000,
9249 payment_hash: PaymentHash([0; 32]),
9250 state: InboundHTLCState::Committed,
9252 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9256 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9257 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9258 chan.context.feerate_per_kw = 0;
9260 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9261 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9262 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9265 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9266 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9267 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9270 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9271 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9272 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9275 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9276 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9277 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9280 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9281 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9282 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9285 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9286 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9287 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9290 // commitment tx with seven outputs untrimmed (maximum feerate)
9291 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9292 chan.context.feerate_per_kw = 647;
9294 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9295 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9296 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9299 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9300 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9301 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9304 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9305 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9306 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9309 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9310 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9311 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9314 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9315 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9316 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9319 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9320 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9321 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9324 // commitment tx with six outputs untrimmed (minimum feerate)
9325 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9326 chan.context.feerate_per_kw = 648;
9328 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9329 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9330 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9333 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9334 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9335 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9338 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9339 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9340 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9343 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9344 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9345 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9348 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9349 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9350 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9353 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9354 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9355 chan.context.feerate_per_kw = 645;
9356 chan.context.holder_dust_limit_satoshis = 1001;
9358 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9359 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9360 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9363 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9364 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9365 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9368 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9369 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9370 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9373 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9374 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9375 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9378 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9379 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9380 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9383 // commitment tx with six outputs untrimmed (maximum feerate)
9384 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9385 chan.context.feerate_per_kw = 2069;
9386 chan.context.holder_dust_limit_satoshis = 546;
9388 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9389 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9390 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9393 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9394 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9395 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9398 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9399 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9400 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9403 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9404 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9405 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9408 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9409 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9410 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9413 // commitment tx with five outputs untrimmed (minimum feerate)
9414 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9415 chan.context.feerate_per_kw = 2070;
9417 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9418 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9419 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9422 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9423 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9424 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9427 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9428 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9429 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9432 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9433 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9434 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9437 // commitment tx with five outputs untrimmed (maximum feerate)
9438 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9439 chan.context.feerate_per_kw = 2194;
9441 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9442 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9443 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9446 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9447 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9448 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9451 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9452 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9453 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9456 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9457 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9458 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9461 // commitment tx with four outputs untrimmed (minimum feerate)
9462 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9463 chan.context.feerate_per_kw = 2195;
9465 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9466 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9467 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9470 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9471 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9472 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9475 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9476 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9477 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9480 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9481 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9482 chan.context.feerate_per_kw = 2185;
9483 chan.context.holder_dust_limit_satoshis = 2001;
9484 let cached_channel_type = chan.context.channel_type;
9485 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9487 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9488 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9489 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9492 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9493 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9494 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9497 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9498 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9499 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9502 // commitment tx with four outputs untrimmed (maximum feerate)
9503 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9504 chan.context.feerate_per_kw = 3702;
9505 chan.context.holder_dust_limit_satoshis = 546;
9506 chan.context.channel_type = cached_channel_type.clone();
9508 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9509 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9510 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9513 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9514 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9515 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9518 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9519 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9520 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9523 // commitment tx with three outputs untrimmed (minimum feerate)
9524 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9525 chan.context.feerate_per_kw = 3703;
9527 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9528 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9529 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9532 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9533 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9534 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9537 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9538 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9539 chan.context.feerate_per_kw = 3687;
9540 chan.context.holder_dust_limit_satoshis = 3001;
9541 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9543 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9544 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9545 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9548 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9549 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9550 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9553 // commitment tx with three outputs untrimmed (maximum feerate)
9554 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9555 chan.context.feerate_per_kw = 4914;
9556 chan.context.holder_dust_limit_satoshis = 546;
9557 chan.context.channel_type = cached_channel_type.clone();
9559 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9560 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9561 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9564 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9565 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9566 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9569 // commitment tx with two outputs untrimmed (minimum feerate)
9570 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9571 chan.context.feerate_per_kw = 4915;
9572 chan.context.holder_dust_limit_satoshis = 546;
9574 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9575 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9576 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9578 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9579 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9580 chan.context.feerate_per_kw = 4894;
9581 chan.context.holder_dust_limit_satoshis = 4001;
9582 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9584 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9585 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9586 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9588 // commitment tx with two outputs untrimmed (maximum feerate)
9589 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9590 chan.context.feerate_per_kw = 9651180;
9591 chan.context.holder_dust_limit_satoshis = 546;
9592 chan.context.channel_type = cached_channel_type.clone();
9594 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9595 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9596 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9598 // commitment tx with one output untrimmed (minimum feerate)
9599 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9600 chan.context.feerate_per_kw = 9651181;
9602 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9603 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9604 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9606 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9607 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9608 chan.context.feerate_per_kw = 6216010;
9609 chan.context.holder_dust_limit_satoshis = 4001;
9610 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9612 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9613 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9614 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9616 // commitment tx with fee greater than funder amount
9617 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9618 chan.context.feerate_per_kw = 9651936;
9619 chan.context.holder_dust_limit_satoshis = 546;
9620 chan.context.channel_type = cached_channel_type;
9622 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9623 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9624 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9626 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9627 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9628 chan.context.feerate_per_kw = 253;
9629 chan.context.pending_inbound_htlcs.clear();
9630 chan.context.pending_inbound_htlcs.push({
9631 let mut out = InboundHTLCOutput{
9633 amount_msat: 2000000,
9635 payment_hash: PaymentHash([0; 32]),
9636 state: InboundHTLCState::Committed,
9638 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9641 chan.context.pending_outbound_htlcs.clear();
9642 chan.context.pending_outbound_htlcs.push({
9643 let mut out = OutboundHTLCOutput{
9645 amount_msat: 5000001,
9647 payment_hash: PaymentHash([0; 32]),
9648 state: OutboundHTLCState::Committed,
9649 source: HTLCSource::dummy(),
9650 skimmed_fee_msat: None,
9651 blinding_point: None,
9653 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9656 chan.context.pending_outbound_htlcs.push({
9657 let mut out = OutboundHTLCOutput{
9659 amount_msat: 5000000,
9661 payment_hash: PaymentHash([0; 32]),
9662 state: OutboundHTLCState::Committed,
9663 source: HTLCSource::dummy(),
9664 skimmed_fee_msat: None,
9665 blinding_point: None,
9667 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9671 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9672 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9673 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9676 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9677 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9678 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9680 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9681 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9682 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9684 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9685 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9686 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9689 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9690 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9691 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9692 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9695 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9696 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9697 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9699 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9700 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9701 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9703 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9704 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9705 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9710 fn test_per_commitment_secret_gen() {
9711 // Test vectors from BOLT 3 Appendix D:
9713 let mut seed = [0; 32];
9714 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9715 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9716 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9718 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9719 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9720 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9722 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9723 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9725 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9726 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9728 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9729 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9730 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9734 fn test_key_derivation() {
9735 // Test vectors from BOLT 3 Appendix E:
9736 let secp_ctx = Secp256k1::new();
9738 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9739 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9741 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9742 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9744 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9745 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9747 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9748 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9750 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9751 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9753 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9754 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9758 fn test_zero_conf_channel_type_support() {
9759 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9760 let secp_ctx = Secp256k1::new();
9761 let seed = [42; 32];
9762 let network = Network::Testnet;
9763 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9764 let logger = test_utils::TestLogger::new();
9766 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9767 let config = UserConfig::default();
9768 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9769 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9771 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9772 channel_type_features.set_zero_conf_required();
9774 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9775 open_channel_msg.channel_type = Some(channel_type_features);
9776 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9777 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9778 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9779 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9780 assert!(res.is_ok());
9784 fn test_supports_anchors_zero_htlc_tx_fee() {
9785 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9786 // resulting `channel_type`.
9787 let secp_ctx = Secp256k1::new();
9788 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9789 let network = Network::Testnet;
9790 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9791 let logger = test_utils::TestLogger::new();
9793 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9794 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9796 let mut config = UserConfig::default();
9797 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9799 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9800 // need to signal it.
9801 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9802 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9803 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9804 &config, 0, 42, None
9806 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9808 let mut expected_channel_type = ChannelTypeFeatures::empty();
9809 expected_channel_type.set_static_remote_key_required();
9810 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9812 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9813 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9814 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9818 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9819 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9820 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9821 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9822 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9825 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9826 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9830 fn test_rejects_implicit_simple_anchors() {
9831 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9832 // each side's `InitFeatures`, it is rejected.
9833 let secp_ctx = Secp256k1::new();
9834 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9835 let network = Network::Testnet;
9836 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9837 let logger = test_utils::TestLogger::new();
9839 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9840 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9842 let config = UserConfig::default();
9844 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9845 let static_remote_key_required: u64 = 1 << 12;
9846 let simple_anchors_required: u64 = 1 << 20;
9847 let raw_init_features = static_remote_key_required | simple_anchors_required;
9848 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9850 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9851 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9852 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9856 // Set `channel_type` to `None` to force the implicit feature negotiation.
9857 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9858 open_channel_msg.channel_type = None;
9860 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9861 // `static_remote_key`, it will fail the channel.
9862 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9863 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9864 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9865 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9867 assert!(channel_b.is_err());
9871 fn test_rejects_simple_anchors_channel_type() {
9872 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9874 let secp_ctx = Secp256k1::new();
9875 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9876 let network = Network::Testnet;
9877 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9878 let logger = test_utils::TestLogger::new();
9880 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9881 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9883 let config = UserConfig::default();
9885 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9886 let static_remote_key_required: u64 = 1 << 12;
9887 let simple_anchors_required: u64 = 1 << 20;
9888 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9889 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9890 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9891 assert!(!simple_anchors_init.requires_unknown_bits());
9892 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9894 // First, we'll try to open a channel between A and B where A requests a channel type for
9895 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9896 // B as it's not supported by LDK.
9897 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9898 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9899 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9903 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9904 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9906 let res = InboundV1Channel::<&TestKeysInterface>::new(
9907 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9908 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9909 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9911 assert!(res.is_err());
9913 // Then, we'll try to open another channel where A requests a channel type for
9914 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9915 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9917 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9918 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9919 10000000, 100000, 42, &config, 0, 42, None
9922 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9924 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9925 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9926 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9927 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9930 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9931 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9933 let res = channel_a.accept_channel(
9934 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9936 assert!(res.is_err());
9940 fn test_waiting_for_batch() {
9941 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9942 let logger = test_utils::TestLogger::new();
9943 let secp_ctx = Secp256k1::new();
9944 let seed = [42; 32];
9945 let network = Network::Testnet;
9946 let best_block = BestBlock::from_network(network);
9947 let chain_hash = ChainHash::using_genesis_block(network);
9948 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9950 let mut config = UserConfig::default();
9951 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9952 // channel in a batch before all channels are ready.
9953 config.channel_handshake_limits.trust_own_funding_0conf = true;
9955 // Create a channel from node a to node b that will be part of batch funding.
9956 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9957 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9962 &channelmanager::provided_init_features(&config),
9972 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9973 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9974 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9979 &channelmanager::provided_channel_type_features(&config),
9980 &channelmanager::provided_init_features(&config),
9986 true, // Allow node b to send a 0conf channel_ready.
9989 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9990 node_a_chan.accept_channel(
9991 &accept_channel_msg,
9992 &config.channel_handshake_limits,
9993 &channelmanager::provided_init_features(&config),
9996 // Fund the channel with a batch funding transaction.
9997 let output_script = node_a_chan.context.get_funding_redeemscript();
9998 let tx = Transaction {
10000 lock_time: LockTime::ZERO,
10004 value: 10000000, script_pubkey: output_script.clone(),
10007 value: 10000000, script_pubkey: Builder::new().into_script(),
10010 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10011 let funding_created_msg = node_a_chan.get_funding_created(
10012 tx.clone(), funding_outpoint, true, &&logger,
10013 ).map_err(|_| ()).unwrap();
10014 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
10015 &funding_created_msg.unwrap(),
10019 ).map_err(|_| ()).unwrap();
10020 let node_b_updates = node_b_chan.monitor_updating_restored(
10028 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
10029 // broadcasting the funding transaction until the batch is ready.
10030 let res = node_a_chan.funding_signed(
10031 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
10033 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10034 let node_a_updates = node_a_chan.monitor_updating_restored(
10041 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10042 // as the funding transaction depends on all channels in the batch becoming ready.
10043 assert!(node_a_updates.channel_ready.is_none());
10044 assert!(node_a_updates.funding_broadcastable.is_none());
10045 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10047 // It is possible to receive a 0conf channel_ready from the remote node.
10048 node_a_chan.channel_ready(
10049 &node_b_updates.channel_ready.unwrap(),
10057 node_a_chan.context.channel_state,
10058 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10061 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10062 node_a_chan.set_batch_ready();
10063 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10064 assert!(node_a_chan.check_get_channel_ready(0).is_some());