1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
265 sha256_of_onion: [u8; 32],
269 macro_rules! define_state_flags {
270 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
271 #[doc = $flag_type_doc]
272 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
273 struct $flag_type(u32);
278 const $flag: $flag_type = $flag_type($value);
281 /// All flags that apply to the specified [`ChannelState`] variant.
283 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
286 fn new() -> Self { Self(0) }
289 fn from_u32(flags: u32) -> Result<Self, ()> {
290 if flags & !Self::ALL.0 != 0 {
293 Ok($flag_type(flags))
298 fn is_empty(&self) -> bool { self.0 == 0 }
301 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
304 impl core::ops::Not for $flag_type {
306 fn not(self) -> Self::Output { Self(!self.0) }
308 impl core::ops::BitOr for $flag_type {
310 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
312 impl core::ops::BitOrAssign for $flag_type {
313 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
315 impl core::ops::BitAnd for $flag_type {
317 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
319 impl core::ops::BitAndAssign for $flag_type {
320 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
323 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
324 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
326 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
327 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
328 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
330 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
332 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
333 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
335 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
337 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
339 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
340 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
342 impl PartialEq<FundedStateFlags> for $flag_type {
343 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
345 impl From<FundedStateFlags> for $flag_type {
346 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
351 /// We declare all the states/flags here together to help determine which bits are still available
354 pub const OUR_INIT_SENT: u32 = 1 << 0;
355 pub const THEIR_INIT_SENT: u32 = 1 << 1;
356 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
357 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
358 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
359 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
360 pub const CHANNEL_READY: u32 = 1 << 6;
361 pub const PEER_DISCONNECTED: u32 = 1 << 7;
362 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
363 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
364 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
365 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
366 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
367 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
371 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
373 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
374 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
375 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
376 somewhere and we should pause sending any outbound messages until they've managed to \
377 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
378 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
379 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
380 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
381 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
382 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
387 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
388 NegotiatingFundingFlags, [
389 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
390 OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
391 ("Indicates we have received their `open_channel`/`accept_channel` message.",
392 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
397 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
398 FUNDED_STATE, AwaitingChannelReadyFlags, [
399 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
400 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
401 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
402 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
403 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
404 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
405 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
406 is being held until all channels in the batch have received `funding_signed` and have \
407 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
412 "Flags that only apply to [`ChannelState::ChannelReady`].",
413 FUNDED_STATE, ChannelReadyFlags, [
414 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
415 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
416 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
417 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
418 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
422 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
424 /// We are negotiating the parameters required for the channel prior to funding it.
425 NegotiatingFunding(NegotiatingFundingFlags),
426 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
427 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
428 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
430 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
431 /// funding transaction to confirm.
432 AwaitingChannelReady(AwaitingChannelReadyFlags),
433 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
435 ChannelReady(ChannelReadyFlags),
436 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
437 /// is about to drop us, but we store this anyway.
441 macro_rules! impl_state_flag {
442 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
444 fn $get(&self) -> bool {
447 ChannelState::$state(flags) => flags.is_set($state_flag.into()),
456 ChannelState::$state(flags) => *flags |= $state_flag,
458 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
462 fn $clear(&mut self) {
465 ChannelState::$state(flags) => *flags &= !($state_flag),
467 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
471 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
472 impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
474 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
475 impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
480 fn from_u32(state: u32) -> Result<Self, ()> {
482 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
483 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
485 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
486 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
487 .map(|flags| ChannelState::AwaitingChannelReady(flags))
488 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
489 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
490 .map(|flags| ChannelState::ChannelReady(flags))
491 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
492 Ok(ChannelState::NegotiatingFunding(flags))
500 fn to_u32(&self) -> u32 {
502 ChannelState::NegotiatingFunding(flags) => flags.0,
503 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
504 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
505 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
506 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
510 fn is_pre_funded_state(&self) -> bool {
511 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
514 fn is_both_sides_shutdown(&self) -> bool {
515 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
518 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
520 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
521 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
522 _ => FundedStateFlags::new(),
526 fn should_force_holding_cell(&self) -> bool {
528 ChannelState::ChannelReady(flags) =>
529 flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
530 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
531 flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
533 debug_assert!(false, "The holding cell is only valid within ChannelReady");
539 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
540 FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
541 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
542 FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
543 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
544 FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
545 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
546 FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
547 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
548 AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
549 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
550 AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
551 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
552 AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
553 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
554 ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
557 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
559 pub const DEFAULT_MAX_HTLCS: u16 = 50;
561 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
562 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
563 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
564 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
568 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
570 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
572 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
574 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
575 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
576 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
577 /// `holder_max_htlc_value_in_flight_msat`.
578 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
580 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
581 /// `option_support_large_channel` (aka wumbo channels) is not supported.
583 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
585 /// Total bitcoin supply in satoshis.
586 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
588 /// The maximum network dust limit for standard script formats. This currently represents the
589 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
590 /// transaction non-standard and thus refuses to relay it.
591 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
592 /// implementations use this value for their dust limit today.
593 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
595 /// The maximum channel dust limit we will accept from our counterparty.
596 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
598 /// The dust limit is used for both the commitment transaction outputs as well as the closing
599 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
600 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
601 /// In order to avoid having to concern ourselves with standardness during the closing process, we
602 /// simply require our counterparty to use a dust limit which will leave any segwit output
604 /// See <https://github.com/lightning/bolts/issues/905> for more details.
605 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
607 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
608 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
610 /// Used to return a simple Error back to ChannelManager. Will get converted to a
611 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
612 /// channel_id in ChannelManager.
613 pub(super) enum ChannelError {
619 impl fmt::Debug for ChannelError {
620 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
622 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
623 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
624 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
629 impl fmt::Display for ChannelError {
630 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
632 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
633 &ChannelError::Warn(ref e) => write!(f, "{}", e),
634 &ChannelError::Close(ref e) => write!(f, "{}", e),
639 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
641 pub peer_id: Option<PublicKey>,
642 pub channel_id: Option<ChannelId>,
645 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
646 fn log(&self, mut record: Record) {
647 record.peer_id = self.peer_id;
648 record.channel_id = self.channel_id;
649 self.logger.log(record)
653 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
654 where L::Target: Logger {
655 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
656 where S::Target: SignerProvider
660 peer_id: Some(context.counterparty_node_id),
661 channel_id: Some(context.channel_id),
666 macro_rules! secp_check {
667 ($res: expr, $err: expr) => {
670 Err(_) => return Err(ChannelError::Close($err)),
675 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
676 /// our counterparty or not. However, we don't want to announce updates right away to avoid
677 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
678 /// our channel_update message and track the current state here.
679 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
680 #[derive(Clone, Copy, PartialEq)]
681 pub(super) enum ChannelUpdateStatus {
682 /// We've announced the channel as enabled and are connected to our peer.
684 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
686 /// Our channel is live again, but we haven't announced the channel as enabled yet.
688 /// We've announced the channel as disabled.
692 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
694 pub enum AnnouncementSigsState {
695 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
696 /// we sent the last `AnnouncementSignatures`.
698 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
699 /// This state never appears on disk - instead we write `NotSent`.
701 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
702 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
703 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
704 /// they send back a `RevokeAndACK`.
705 /// This state never appears on disk - instead we write `NotSent`.
707 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
708 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
712 /// An enum indicating whether the local or remote side offered a given HTLC.
718 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
721 pending_htlcs_value_msat: u64,
722 on_counterparty_tx_dust_exposure_msat: u64,
723 on_holder_tx_dust_exposure_msat: u64,
724 holding_cell_msat: u64,
725 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
728 /// An enum gathering stats on commitment transaction, either local or remote.
729 struct CommitmentStats<'a> {
730 tx: CommitmentTransaction, // the transaction info
731 feerate_per_kw: u32, // the feerate included to build the transaction
732 total_fee_sat: u64, // the total fee included in the transaction
733 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
734 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
735 local_balance_msat: u64, // local balance before fees *not* considering dust limits
736 remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
737 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
738 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
741 /// Used when calculating whether we or the remote can afford an additional HTLC.
742 struct HTLCCandidate {
744 origin: HTLCInitiator,
748 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
756 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
758 enum UpdateFulfillFetch {
760 monitor_update: ChannelMonitorUpdate,
761 htlc_value_msat: u64,
762 msg: Option<msgs::UpdateFulfillHTLC>,
767 /// The return type of get_update_fulfill_htlc_and_commit.
768 pub enum UpdateFulfillCommitFetch {
769 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
770 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
771 /// previously placed in the holding cell (and has since been removed).
773 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
774 monitor_update: ChannelMonitorUpdate,
775 /// The value of the HTLC which was claimed, in msat.
776 htlc_value_msat: u64,
778 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
779 /// or has been forgotten (presumably previously claimed).
783 /// The return value of `monitor_updating_restored`
784 pub(super) struct MonitorRestoreUpdates {
785 pub raa: Option<msgs::RevokeAndACK>,
786 pub commitment_update: Option<msgs::CommitmentUpdate>,
787 pub order: RAACommitmentOrder,
788 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
789 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
790 pub finalized_claimed_htlcs: Vec<HTLCSource>,
791 pub funding_broadcastable: Option<Transaction>,
792 pub channel_ready: Option<msgs::ChannelReady>,
793 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
796 /// The return value of `signer_maybe_unblocked`
798 pub(super) struct SignerResumeUpdates {
799 pub commitment_update: Option<msgs::CommitmentUpdate>,
800 pub funding_signed: Option<msgs::FundingSigned>,
801 pub channel_ready: Option<msgs::ChannelReady>,
804 /// The return value of `channel_reestablish`
805 pub(super) struct ReestablishResponses {
806 pub channel_ready: Option<msgs::ChannelReady>,
807 pub raa: Option<msgs::RevokeAndACK>,
808 pub commitment_update: Option<msgs::CommitmentUpdate>,
809 pub order: RAACommitmentOrder,
810 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
811 pub shutdown_msg: Option<msgs::Shutdown>,
814 /// The result of a shutdown that should be handled.
816 pub(crate) struct ShutdownResult {
817 pub(crate) closure_reason: ClosureReason,
818 /// A channel monitor update to apply.
819 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
820 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
821 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
822 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
823 /// propagated to the remainder of the batch.
824 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
825 pub(crate) channel_id: ChannelId,
826 pub(crate) user_channel_id: u128,
827 pub(crate) channel_capacity_satoshis: u64,
828 pub(crate) counterparty_node_id: PublicKey,
829 pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
832 /// If the majority of the channels funds are to the fundee and the initiator holds only just
833 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
834 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
835 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
836 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
837 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
838 /// by this multiple without hitting this case, before sending.
839 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
840 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
841 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
842 /// leave the channel less usable as we hold a bigger reserve.
843 #[cfg(any(fuzzing, test))]
844 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
845 #[cfg(not(any(fuzzing, test)))]
846 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
848 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
849 /// channel creation on an inbound channel, we simply force-close and move on.
850 /// This constant is the one suggested in BOLT 2.
851 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
853 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
854 /// not have enough balance value remaining to cover the onchain cost of this new
855 /// HTLC weight. If this happens, our counterparty fails the reception of our
856 /// commitment_signed including this new HTLC due to infringement on the channel
858 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
859 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
860 /// leads to a channel force-close. Ultimately, this is an issue coming from the
861 /// design of LN state machines, allowing asynchronous updates.
862 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
864 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
865 /// commitment transaction fees, with at least this many HTLCs present on the commitment
866 /// transaction (not counting the value of the HTLCs themselves).
867 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
869 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
870 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
871 /// ChannelUpdate prompted by the config update. This value was determined as follows:
873 /// * The expected interval between ticks (1 minute).
874 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
875 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
876 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
877 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
879 /// The number of ticks that may elapse while we're waiting for a response to a
880 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
883 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
884 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
886 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
887 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
888 /// exceeding this age limit will be force-closed and purged from memory.
889 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
891 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
892 pub(crate) const COINBASE_MATURITY: u32 = 100;
894 struct PendingChannelMonitorUpdate {
895 update: ChannelMonitorUpdate,
898 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
899 (0, update, required),
902 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
903 /// its variants containing an appropriate channel struct.
904 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
905 UnfundedOutboundV1(OutboundV1Channel<SP>),
906 UnfundedInboundV1(InboundV1Channel<SP>),
910 impl<'a, SP: Deref> ChannelPhase<SP> where
911 SP::Target: SignerProvider,
912 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
914 pub fn context(&'a self) -> &'a ChannelContext<SP> {
916 ChannelPhase::Funded(chan) => &chan.context,
917 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
918 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
922 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
924 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
925 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
926 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
931 /// Contains all state common to unfunded inbound/outbound channels.
932 pub(super) struct UnfundedChannelContext {
933 /// A counter tracking how many ticks have elapsed since this unfunded channel was
934 /// created. If this unfunded channel reaches peer has yet to respond after reaching
935 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
937 /// This is so that we don't keep channels around that haven't progressed to a funded state
938 /// in a timely manner.
939 unfunded_channel_age_ticks: usize,
942 impl UnfundedChannelContext {
943 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
944 /// having reached the unfunded channel age limit.
946 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
947 pub fn should_expire_unfunded_channel(&mut self) -> bool {
948 self.unfunded_channel_age_ticks += 1;
949 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
953 /// Contains everything about the channel including state, and various flags.
954 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
955 config: LegacyChannelConfig,
957 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
958 // constructed using it. The second element in the tuple corresponds to the number of ticks that
959 // have elapsed since the update occurred.
960 prev_config: Option<(ChannelConfig, usize)>,
962 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
966 /// The current channel ID.
967 channel_id: ChannelId,
968 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
969 /// Will be `None` for channels created prior to 0.0.115.
970 temporary_channel_id: Option<ChannelId>,
971 channel_state: ChannelState,
973 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
974 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
976 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
977 // Note that a number of our tests were written prior to the behavior here which retransmits
978 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
980 #[cfg(any(test, feature = "_test_utils"))]
981 pub(crate) announcement_sigs_state: AnnouncementSigsState,
982 #[cfg(not(any(test, feature = "_test_utils")))]
983 announcement_sigs_state: AnnouncementSigsState,
985 secp_ctx: Secp256k1<secp256k1::All>,
986 channel_value_satoshis: u64,
988 latest_monitor_update_id: u64,
990 holder_signer: ChannelSignerType<SP>,
991 shutdown_scriptpubkey: Option<ShutdownScript>,
992 destination_script: ScriptBuf,
994 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
995 // generation start at 0 and count up...this simplifies some parts of implementation at the
996 // cost of others, but should really just be changed.
998 cur_holder_commitment_transaction_number: u64,
999 cur_counterparty_commitment_transaction_number: u64,
1000 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
1001 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
1002 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
1003 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1005 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1006 /// need to ensure we resend them in the order we originally generated them. Note that because
1007 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1008 /// sufficient to simply set this to the opposite of any message we are generating as we
1009 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1010 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1012 resend_order: RAACommitmentOrder,
1014 monitor_pending_channel_ready: bool,
1015 monitor_pending_revoke_and_ack: bool,
1016 monitor_pending_commitment_signed: bool,
1018 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1019 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1020 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1021 // but need to handle this somehow or we run the risk of losing HTLCs!
1022 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1023 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1024 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1026 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1027 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1028 /// the future when the signer indicates it may have a signature for us.
1030 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1031 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1032 signer_pending_commitment_update: bool,
1033 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1034 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1035 /// outbound or inbound.
1036 signer_pending_funding: bool,
1038 // pending_update_fee is filled when sending and receiving update_fee.
1040 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1041 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1042 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1043 // HTLCs with similar state.
1044 pending_update_fee: Option<(u32, FeeUpdateState)>,
1045 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1046 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1047 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1048 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1049 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1050 holding_cell_update_fee: Option<u32>,
1051 next_holder_htlc_id: u64,
1052 next_counterparty_htlc_id: u64,
1053 feerate_per_kw: u32,
1055 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1056 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1057 /// new block is received, ensuring it's always at least moderately close to the current real
1059 update_time_counter: u32,
1061 #[cfg(debug_assertions)]
1062 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1063 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1064 #[cfg(debug_assertions)]
1065 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1066 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1068 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1069 target_closing_feerate_sats_per_kw: Option<u32>,
1071 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1072 /// update, we need to delay processing it until later. We do that here by simply storing the
1073 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1074 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1076 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1077 /// transaction. These are set once we reach `closing_negotiation_ready`.
1079 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1081 closing_fee_limits: Option<(u64, u64)>,
1083 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1084 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1085 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1086 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1087 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1089 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1090 /// until we see a `commitment_signed` before doing so.
1092 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1093 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1094 expecting_peer_commitment_signed: bool,
1096 /// The hash of the block in which the funding transaction was included.
1097 funding_tx_confirmed_in: Option<BlockHash>,
1098 funding_tx_confirmation_height: u32,
1099 short_channel_id: Option<u64>,
1100 /// Either the height at which this channel was created or the height at which it was last
1101 /// serialized if it was serialized by versions prior to 0.0.103.
1102 /// We use this to close if funding is never broadcasted.
1103 channel_creation_height: u32,
1105 counterparty_dust_limit_satoshis: u64,
1108 pub(super) holder_dust_limit_satoshis: u64,
1110 holder_dust_limit_satoshis: u64,
1113 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1115 counterparty_max_htlc_value_in_flight_msat: u64,
1118 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1120 holder_max_htlc_value_in_flight_msat: u64,
1122 /// minimum channel reserve for self to maintain - set by them.
1123 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1126 pub(super) holder_selected_channel_reserve_satoshis: u64,
1128 holder_selected_channel_reserve_satoshis: u64,
1130 counterparty_htlc_minimum_msat: u64,
1131 holder_htlc_minimum_msat: u64,
1133 pub counterparty_max_accepted_htlcs: u16,
1135 counterparty_max_accepted_htlcs: u16,
1136 holder_max_accepted_htlcs: u16,
1137 minimum_depth: Option<u32>,
1139 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1141 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1142 funding_transaction: Option<Transaction>,
1143 is_batch_funding: Option<()>,
1145 counterparty_cur_commitment_point: Option<PublicKey>,
1146 counterparty_prev_commitment_point: Option<PublicKey>,
1147 counterparty_node_id: PublicKey,
1149 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1151 commitment_secrets: CounterpartyCommitmentSecrets,
1153 channel_update_status: ChannelUpdateStatus,
1154 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1155 /// not complete within a single timer tick (one minute), we should force-close the channel.
1156 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1158 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1159 /// our peer and start the closing_signed negotiation fresh.
1160 closing_signed_in_flight: bool,
1162 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1163 /// This can be used to rebroadcast the channel_announcement message later.
1164 announcement_sigs: Option<(Signature, Signature)>,
1166 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1167 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1168 // be, by comparing the cached values to the fee of the tranaction generated by
1169 // `build_commitment_transaction`.
1170 #[cfg(any(test, fuzzing))]
1171 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1172 #[cfg(any(test, fuzzing))]
1173 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1175 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1176 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1177 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1178 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1179 /// message until we receive a channel_reestablish.
1181 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1182 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1184 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1185 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1186 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1187 /// unblock the state machine.
1189 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1190 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1191 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1193 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1194 /// [`msgs::RevokeAndACK`] message from the counterparty.
1195 sent_message_awaiting_response: Option<usize>,
1197 #[cfg(any(test, fuzzing))]
1198 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1199 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1200 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1201 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1202 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1203 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1204 historical_inbound_htlc_fulfills: HashSet<u64>,
1206 /// This channel's type, as negotiated during channel open
1207 channel_type: ChannelTypeFeatures,
1209 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1210 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1211 // the channel's funding UTXO.
1213 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1214 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1215 // associated channel mapping.
1217 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1218 // to store all of them.
1219 latest_inbound_scid_alias: Option<u64>,
1221 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1222 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1223 // don't currently support node id aliases and eventually privacy should be provided with
1224 // blinded paths instead of simple scid+node_id aliases.
1225 outbound_scid_alias: u64,
1227 // We track whether we already emitted a `ChannelPending` event.
1228 channel_pending_event_emitted: bool,
1230 // We track whether we already emitted a `ChannelReady` event.
1231 channel_ready_event_emitted: bool,
1233 /// The unique identifier used to re-derive the private key material for the channel through
1234 /// [`SignerProvider::derive_channel_signer`].
1235 channel_keys_id: [u8; 32],
1237 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1238 /// store it here and only release it to the `ChannelManager` once it asks for it.
1239 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1242 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1243 /// Allowed in any state (including after shutdown)
1244 pub fn get_update_time_counter(&self) -> u32 {
1245 self.update_time_counter
1248 pub fn get_latest_monitor_update_id(&self) -> u64 {
1249 self.latest_monitor_update_id
1252 pub fn should_announce(&self) -> bool {
1253 self.config.announced_channel
1256 pub fn is_outbound(&self) -> bool {
1257 self.channel_transaction_parameters.is_outbound_from_holder
1260 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1261 /// Allowed in any state (including after shutdown)
1262 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1263 self.config.options.forwarding_fee_base_msat
1266 /// Returns true if we've ever received a message from the remote end for this Channel
1267 pub fn have_received_message(&self) -> bool {
1268 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1271 /// Returns true if this channel is fully established and not known to be closing.
1272 /// Allowed in any state (including after shutdown)
1273 pub fn is_usable(&self) -> bool {
1274 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1275 !self.channel_state.is_local_shutdown_sent() &&
1276 !self.channel_state.is_remote_shutdown_sent() &&
1277 !self.monitor_pending_channel_ready
1280 /// shutdown state returns the state of the channel in its various stages of shutdown
1281 pub fn shutdown_state(&self) -> ChannelShutdownState {
1282 match self.channel_state {
1283 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1284 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1285 ChannelShutdownState::ShutdownInitiated
1286 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1287 ChannelShutdownState::ResolvingHTLCs
1288 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1289 ChannelShutdownState::NegotiatingClosingFee
1291 ChannelShutdownState::NotShuttingDown
1293 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1294 _ => ChannelShutdownState::NotShuttingDown,
1298 fn closing_negotiation_ready(&self) -> bool {
1299 let is_ready_to_close = match self.channel_state {
1300 ChannelState::AwaitingChannelReady(flags) =>
1301 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1302 ChannelState::ChannelReady(flags) =>
1303 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1306 self.pending_inbound_htlcs.is_empty() &&
1307 self.pending_outbound_htlcs.is_empty() &&
1308 self.pending_update_fee.is_none() &&
1312 /// Returns true if this channel is currently available for use. This is a superset of
1313 /// is_usable() and considers things like the channel being temporarily disabled.
1314 /// Allowed in any state (including after shutdown)
1315 pub fn is_live(&self) -> bool {
1316 self.is_usable() && !self.channel_state.is_peer_disconnected()
1319 // Public utilities:
1321 pub fn channel_id(&self) -> ChannelId {
1325 // Return the `temporary_channel_id` used during channel establishment.
1327 // Will return `None` for channels created prior to LDK version 0.0.115.
1328 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1329 self.temporary_channel_id
1332 pub fn minimum_depth(&self) -> Option<u32> {
1336 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1337 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1338 pub fn get_user_id(&self) -> u128 {
1342 /// Gets the channel's type
1343 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1347 /// Gets the channel's `short_channel_id`.
1349 /// Will return `None` if the channel hasn't been confirmed yet.
1350 pub fn get_short_channel_id(&self) -> Option<u64> {
1351 self.short_channel_id
1354 /// Allowed in any state (including after shutdown)
1355 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1356 self.latest_inbound_scid_alias
1359 /// Allowed in any state (including after shutdown)
1360 pub fn outbound_scid_alias(&self) -> u64 {
1361 self.outbound_scid_alias
1364 /// Returns the holder signer for this channel.
1366 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1367 return &self.holder_signer
1370 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1371 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1372 /// or prior to any channel actions during `Channel` initialization.
1373 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1374 debug_assert_eq!(self.outbound_scid_alias, 0);
1375 self.outbound_scid_alias = outbound_scid_alias;
1378 /// Returns the funding_txo we either got from our peer, or were given by
1379 /// get_funding_created.
1380 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1381 self.channel_transaction_parameters.funding_outpoint
1384 /// Returns the height in which our funding transaction was confirmed.
1385 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1386 let conf_height = self.funding_tx_confirmation_height;
1387 if conf_height > 0 {
1394 /// Returns the block hash in which our funding transaction was confirmed.
1395 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1396 self.funding_tx_confirmed_in
1399 /// Returns the current number of confirmations on the funding transaction.
1400 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1401 if self.funding_tx_confirmation_height == 0 {
1402 // We either haven't seen any confirmation yet, or observed a reorg.
1406 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1409 fn get_holder_selected_contest_delay(&self) -> u16 {
1410 self.channel_transaction_parameters.holder_selected_contest_delay
1413 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1414 &self.channel_transaction_parameters.holder_pubkeys
1417 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1418 self.channel_transaction_parameters.counterparty_parameters
1419 .as_ref().map(|params| params.selected_contest_delay)
1422 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1423 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1426 /// Allowed in any state (including after shutdown)
1427 pub fn get_counterparty_node_id(&self) -> PublicKey {
1428 self.counterparty_node_id
1431 /// Allowed in any state (including after shutdown)
1432 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1433 self.holder_htlc_minimum_msat
1436 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1437 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1438 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1441 /// Allowed in any state (including after shutdown)
1442 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1444 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1445 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1446 // channel might have been used to route very small values (either by honest users or as DoS).
1447 self.channel_value_satoshis * 1000 * 9 / 10,
1449 self.counterparty_max_htlc_value_in_flight_msat
1453 /// Allowed in any state (including after shutdown)
1454 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1455 self.counterparty_htlc_minimum_msat
1458 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1459 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1460 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1463 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1464 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1465 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1467 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1468 party_max_htlc_value_in_flight_msat
1473 pub fn get_value_satoshis(&self) -> u64 {
1474 self.channel_value_satoshis
1477 pub fn get_fee_proportional_millionths(&self) -> u32 {
1478 self.config.options.forwarding_fee_proportional_millionths
1481 pub fn get_cltv_expiry_delta(&self) -> u16 {
1482 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1485 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1486 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1487 where F::Target: FeeEstimator
1489 match self.config.options.max_dust_htlc_exposure {
1490 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1491 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1492 ConfirmationTarget::OnChainSweep) as u64;
1493 feerate_per_kw.saturating_mul(multiplier)
1495 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1499 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1500 pub fn prev_config(&self) -> Option<ChannelConfig> {
1501 self.prev_config.map(|prev_config| prev_config.0)
1504 // Checks whether we should emit a `ChannelPending` event.
1505 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1506 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1509 // Returns whether we already emitted a `ChannelPending` event.
1510 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1511 self.channel_pending_event_emitted
1514 // Remembers that we already emitted a `ChannelPending` event.
1515 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1516 self.channel_pending_event_emitted = true;
1519 // Checks whether we should emit a `ChannelReady` event.
1520 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1521 self.is_usable() && !self.channel_ready_event_emitted
1524 // Remembers that we already emitted a `ChannelReady` event.
1525 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1526 self.channel_ready_event_emitted = true;
1529 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1530 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1531 /// no longer be considered when forwarding HTLCs.
1532 pub fn maybe_expire_prev_config(&mut self) {
1533 if self.prev_config.is_none() {
1536 let prev_config = self.prev_config.as_mut().unwrap();
1538 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1539 self.prev_config = None;
1543 /// Returns the current [`ChannelConfig`] applied to the channel.
1544 pub fn config(&self) -> ChannelConfig {
1548 /// Updates the channel's config. A bool is returned indicating whether the config update
1549 /// applied resulted in a new ChannelUpdate message.
1550 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1551 let did_channel_update =
1552 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1553 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1554 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1555 if did_channel_update {
1556 self.prev_config = Some((self.config.options, 0));
1557 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1558 // policy change to propagate throughout the network.
1559 self.update_time_counter += 1;
1561 self.config.options = *config;
1565 /// Returns true if funding_signed was sent/received and the
1566 /// funding transaction has been broadcast if necessary.
1567 pub fn is_funding_broadcast(&self) -> bool {
1568 !self.channel_state.is_pre_funded_state() &&
1569 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1572 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1573 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1574 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1575 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1576 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1578 /// @local is used only to convert relevant internal structures which refer to remote vs local
1579 /// to decide value of outputs and direction of HTLCs.
1580 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1581 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1582 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1583 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1584 /// which peer generated this transaction and "to whom" this transaction flows.
1586 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1587 where L::Target: Logger
1589 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1590 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1591 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1593 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1594 let mut remote_htlc_total_msat = 0;
1595 let mut local_htlc_total_msat = 0;
1596 let mut value_to_self_msat_offset = 0;
1598 let mut feerate_per_kw = self.feerate_per_kw;
1599 if let Some((feerate, update_state)) = self.pending_update_fee {
1600 if match update_state {
1601 // Note that these match the inclusion criteria when scanning
1602 // pending_inbound_htlcs below.
1603 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1604 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1605 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1607 feerate_per_kw = feerate;
1611 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1612 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1613 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1615 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1617 macro_rules! get_htlc_in_commitment {
1618 ($htlc: expr, $offered: expr) => {
1619 HTLCOutputInCommitment {
1621 amount_msat: $htlc.amount_msat,
1622 cltv_expiry: $htlc.cltv_expiry,
1623 payment_hash: $htlc.payment_hash,
1624 transaction_output_index: None
1629 macro_rules! add_htlc_output {
1630 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1631 if $outbound == local { // "offered HTLC output"
1632 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1633 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1636 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1638 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1639 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1640 included_non_dust_htlcs.push((htlc_in_tx, $source));
1642 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1643 included_dust_htlcs.push((htlc_in_tx, $source));
1646 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1647 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1650 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1652 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1653 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1654 included_non_dust_htlcs.push((htlc_in_tx, $source));
1656 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1657 included_dust_htlcs.push((htlc_in_tx, $source));
1663 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1665 for ref htlc in self.pending_inbound_htlcs.iter() {
1666 let (include, state_name) = match htlc.state {
1667 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1668 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1669 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1670 InboundHTLCState::Committed => (true, "Committed"),
1671 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1675 add_htlc_output!(htlc, false, None, state_name);
1676 remote_htlc_total_msat += htlc.amount_msat;
1678 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1680 &InboundHTLCState::LocalRemoved(ref reason) => {
1681 if generated_by_local {
1682 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1683 inbound_htlc_preimages.push(preimage);
1684 value_to_self_msat_offset += htlc.amount_msat as i64;
1694 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1696 for ref htlc in self.pending_outbound_htlcs.iter() {
1697 let (include, state_name) = match htlc.state {
1698 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1699 OutboundHTLCState::Committed => (true, "Committed"),
1700 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1701 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1702 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1705 let preimage_opt = match htlc.state {
1706 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1707 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1708 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1712 if let Some(preimage) = preimage_opt {
1713 outbound_htlc_preimages.push(preimage);
1717 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1718 local_htlc_total_msat += htlc.amount_msat;
1720 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1722 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1723 value_to_self_msat_offset -= htlc.amount_msat as i64;
1725 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1726 if !generated_by_local {
1727 value_to_self_msat_offset -= htlc.amount_msat as i64;
1735 let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1736 assert!(value_to_self_msat >= 0);
1737 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1738 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1739 // "violate" their reserve value by couting those against it. Thus, we have to convert
1740 // everything to i64 before subtracting as otherwise we can overflow.
1741 let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1742 assert!(value_to_remote_msat >= 0);
1744 #[cfg(debug_assertions)]
1746 // Make sure that the to_self/to_remote is always either past the appropriate
1747 // channel_reserve *or* it is making progress towards it.
1748 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1749 self.holder_max_commitment_tx_output.lock().unwrap()
1751 self.counterparty_max_commitment_tx_output.lock().unwrap()
1753 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1754 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1755 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1756 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1759 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1760 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1761 let (value_to_self, value_to_remote) = if self.is_outbound() {
1762 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1764 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1767 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1768 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1769 let (funding_pubkey_a, funding_pubkey_b) = if local {
1770 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1772 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1775 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1776 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1781 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1782 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1787 let num_nondust_htlcs = included_non_dust_htlcs.len();
1789 let channel_parameters =
1790 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1791 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1792 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1799 &mut included_non_dust_htlcs,
1802 let mut htlcs_included = included_non_dust_htlcs;
1803 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1804 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1805 htlcs_included.append(&mut included_dust_htlcs);
1813 local_balance_msat: value_to_self_msat as u64,
1814 remote_balance_msat: value_to_remote_msat as u64,
1815 inbound_htlc_preimages,
1816 outbound_htlc_preimages,
1821 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1822 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1823 /// our counterparty!)
1824 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1825 /// TODO Some magic rust shit to compile-time check this?
1826 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1827 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1828 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1829 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1830 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1832 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1836 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1837 /// will sign and send to our counterparty.
1838 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1839 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1840 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1841 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1842 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1844 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1847 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1848 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1849 /// Panics if called before accept_channel/InboundV1Channel::new
1850 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1851 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1854 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1855 &self.get_counterparty_pubkeys().funding_pubkey
1858 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1862 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1863 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1864 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1865 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1866 // more dust balance if the feerate increases when we have several HTLCs pending
1867 // which are near the dust limit.
1868 let mut feerate_per_kw = self.feerate_per_kw;
1869 // If there's a pending update fee, use it to ensure we aren't under-estimating
1870 // potential feerate updates coming soon.
1871 if let Some((feerate, _)) = self.pending_update_fee {
1872 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1874 if let Some(feerate) = outbound_feerate_update {
1875 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1877 let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
1878 cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
1881 /// Get forwarding information for the counterparty.
1882 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1883 self.counterparty_forwarding_info.clone()
1886 /// Returns a HTLCStats about inbound pending htlcs
1887 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1889 let mut stats = HTLCStats {
1890 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1891 pending_htlcs_value_msat: 0,
1892 on_counterparty_tx_dust_exposure_msat: 0,
1893 on_holder_tx_dust_exposure_msat: 0,
1894 holding_cell_msat: 0,
1895 on_holder_tx_holding_cell_htlcs_count: 0,
1898 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1901 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1902 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1903 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1905 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1906 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1907 for ref htlc in context.pending_inbound_htlcs.iter() {
1908 stats.pending_htlcs_value_msat += htlc.amount_msat;
1909 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1910 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1912 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1913 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1919 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1920 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1922 let mut stats = HTLCStats {
1923 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1924 pending_htlcs_value_msat: 0,
1925 on_counterparty_tx_dust_exposure_msat: 0,
1926 on_holder_tx_dust_exposure_msat: 0,
1927 holding_cell_msat: 0,
1928 on_holder_tx_holding_cell_htlcs_count: 0,
1931 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1934 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1935 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1936 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1938 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1939 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1940 for ref htlc in context.pending_outbound_htlcs.iter() {
1941 stats.pending_htlcs_value_msat += htlc.amount_msat;
1942 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1943 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1945 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1946 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1950 for update in context.holding_cell_htlc_updates.iter() {
1951 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1952 stats.pending_htlcs += 1;
1953 stats.pending_htlcs_value_msat += amount_msat;
1954 stats.holding_cell_msat += amount_msat;
1955 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1956 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1958 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1959 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1961 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1968 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1969 /// Doesn't bother handling the
1970 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1971 /// corner case properly.
1972 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1973 -> AvailableBalances
1974 where F::Target: FeeEstimator
1976 let context = &self;
1977 // Note that we have to handle overflow due to the above case.
1978 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1979 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1981 let mut balance_msat = context.value_to_self_msat;
1982 for ref htlc in context.pending_inbound_htlcs.iter() {
1983 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1984 balance_msat += htlc.amount_msat;
1987 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1989 let outbound_capacity_msat = context.value_to_self_msat
1990 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1992 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1994 let mut available_capacity_msat = outbound_capacity_msat;
1996 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1997 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2001 if context.is_outbound() {
2002 // We should mind channel commit tx fee when computing how much of the available capacity
2003 // can be used in the next htlc. Mirrors the logic in send_htlc.
2005 // The fee depends on whether the amount we will be sending is above dust or not,
2006 // and the answer will in turn change the amount itself — making it a circular
2008 // This complicates the computation around dust-values, up to the one-htlc-value.
2009 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2010 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2011 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2014 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2015 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2016 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2017 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2018 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2019 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2020 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2023 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2024 // value ends up being below dust, we have this fee available again. In that case,
2025 // match the value to right-below-dust.
2026 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2027 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2028 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2029 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2030 debug_assert!(one_htlc_difference_msat != 0);
2031 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2032 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2033 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2035 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2038 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2039 // sending a new HTLC won't reduce their balance below our reserve threshold.
2040 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2041 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2042 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2045 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2046 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2048 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2049 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2050 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2052 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2053 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2054 // we've selected for them, we can only send dust HTLCs.
2055 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2059 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2061 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2062 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2063 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2064 // send above the dust limit (as the router can always overpay to meet the dust limit).
2065 let mut remaining_msat_below_dust_exposure_limit = None;
2066 let mut dust_exposure_dust_limit_msat = 0;
2067 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2069 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2070 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2072 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2073 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2074 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2076 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2077 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2078 remaining_msat_below_dust_exposure_limit =
2079 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2080 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2083 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2084 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2085 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2086 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2087 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2088 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2091 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2092 if available_capacity_msat < dust_exposure_dust_limit_msat {
2093 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2095 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2099 available_capacity_msat = cmp::min(available_capacity_msat,
2100 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2102 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2103 available_capacity_msat = 0;
2107 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2108 - context.value_to_self_msat as i64
2109 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2110 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2112 outbound_capacity_msat,
2113 next_outbound_htlc_limit_msat: available_capacity_msat,
2114 next_outbound_htlc_minimum_msat,
2119 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2120 let context = &self;
2121 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2124 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2125 /// number of pending HTLCs that are on track to be in our next commitment tx.
2127 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2128 /// `fee_spike_buffer_htlc` is `Some`.
2130 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2131 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2133 /// Dust HTLCs are excluded.
2134 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2135 let context = &self;
2136 assert!(context.is_outbound());
2138 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2141 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2142 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2144 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2145 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2147 let mut addl_htlcs = 0;
2148 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2150 HTLCInitiator::LocalOffered => {
2151 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2155 HTLCInitiator::RemoteOffered => {
2156 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2162 let mut included_htlcs = 0;
2163 for ref htlc in context.pending_inbound_htlcs.iter() {
2164 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2167 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2168 // transaction including this HTLC if it times out before they RAA.
2169 included_htlcs += 1;
2172 for ref htlc in context.pending_outbound_htlcs.iter() {
2173 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2177 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2178 OutboundHTLCState::Committed => included_htlcs += 1,
2179 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2180 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2181 // transaction won't be generated until they send us their next RAA, which will mean
2182 // dropping any HTLCs in this state.
2187 for htlc in context.holding_cell_htlc_updates.iter() {
2189 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2190 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2195 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2196 // ack we're guaranteed to never include them in commitment txs anymore.
2200 let num_htlcs = included_htlcs + addl_htlcs;
2201 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2202 #[cfg(any(test, fuzzing))]
2205 if fee_spike_buffer_htlc.is_some() {
2206 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2208 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2209 + context.holding_cell_htlc_updates.len();
2210 let commitment_tx_info = CommitmentTxInfoCached {
2212 total_pending_htlcs,
2213 next_holder_htlc_id: match htlc.origin {
2214 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2215 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2217 next_counterparty_htlc_id: match htlc.origin {
2218 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2219 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2221 feerate: context.feerate_per_kw,
2223 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2228 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2229 /// pending HTLCs that are on track to be in their next commitment tx
2231 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2232 /// `fee_spike_buffer_htlc` is `Some`.
2234 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2235 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2237 /// Dust HTLCs are excluded.
2238 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2239 let context = &self;
2240 assert!(!context.is_outbound());
2242 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2245 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2246 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2248 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2249 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2251 let mut addl_htlcs = 0;
2252 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2254 HTLCInitiator::LocalOffered => {
2255 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2259 HTLCInitiator::RemoteOffered => {
2260 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2266 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2267 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2268 // committed outbound HTLCs, see below.
2269 let mut included_htlcs = 0;
2270 for ref htlc in context.pending_inbound_htlcs.iter() {
2271 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2274 included_htlcs += 1;
2277 for ref htlc in context.pending_outbound_htlcs.iter() {
2278 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2281 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2282 // i.e. if they've responded to us with an RAA after announcement.
2284 OutboundHTLCState::Committed => included_htlcs += 1,
2285 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2286 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2291 let num_htlcs = included_htlcs + addl_htlcs;
2292 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2293 #[cfg(any(test, fuzzing))]
2296 if fee_spike_buffer_htlc.is_some() {
2297 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2299 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2300 let commitment_tx_info = CommitmentTxInfoCached {
2302 total_pending_htlcs,
2303 next_holder_htlc_id: match htlc.origin {
2304 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2305 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2307 next_counterparty_htlc_id: match htlc.origin {
2308 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2309 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2311 feerate: context.feerate_per_kw,
2313 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2318 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
2319 match self.channel_state {
2320 ChannelState::FundingNegotiated => f(),
2321 ChannelState::AwaitingChannelReady(flags) =>
2322 if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
2323 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
2333 /// Returns the transaction if there is a pending funding transaction that is yet to be
2335 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2336 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2339 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2341 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2342 self.if_unbroadcasted_funding(||
2343 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2347 /// Returns whether the channel is funded in a batch.
2348 pub fn is_batch_funding(&self) -> bool {
2349 self.is_batch_funding.is_some()
2352 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2354 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2355 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2358 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2359 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2360 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2361 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2362 /// immediately (others we will have to allow to time out).
2363 pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
2364 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2365 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2366 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2367 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2368 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2370 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2371 // return them to fail the payment.
2372 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2373 let counterparty_node_id = self.get_counterparty_node_id();
2374 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2376 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2377 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2382 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2383 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2384 // returning a channel monitor update here would imply a channel monitor update before
2385 // we even registered the channel monitor to begin with, which is invalid.
2386 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2387 // funding transaction, don't return a funding txo (which prevents providing the
2388 // monitor update to the user, even if we return one).
2389 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2390 let generate_monitor_update = match self.channel_state {
2391 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
2394 if generate_monitor_update {
2395 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2396 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2397 update_id: self.latest_monitor_update_id,
2398 counterparty_node_id: Some(self.counterparty_node_id),
2399 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2403 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2404 let unbroadcasted_funding_tx = self.unbroadcasted_funding();
2406 self.channel_state = ChannelState::ShutdownComplete;
2407 self.update_time_counter += 1;
2411 dropped_outbound_htlcs,
2412 unbroadcasted_batch_funding_txid,
2413 channel_id: self.channel_id,
2414 user_channel_id: self.user_id,
2415 channel_capacity_satoshis: self.channel_value_satoshis,
2416 counterparty_node_id: self.counterparty_node_id,
2417 unbroadcasted_funding_tx,
2421 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2422 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2423 let counterparty_keys = self.build_remote_transaction_keys();
2424 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2426 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2427 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2428 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2429 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2431 match &self.holder_signer {
2432 // TODO (arik): move match into calling method for Taproot
2433 ChannelSignerType::Ecdsa(ecdsa) => {
2434 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2435 .map(|(signature, _)| msgs::FundingSigned {
2436 channel_id: self.channel_id(),
2439 partial_signature_with_nonce: None,
2443 if funding_signed.is_none() {
2444 #[cfg(not(async_signing))] {
2445 panic!("Failed to get signature for funding_signed");
2447 #[cfg(async_signing)] {
2448 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2449 self.signer_pending_funding = true;
2451 } else if self.signer_pending_funding {
2452 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2453 self.signer_pending_funding = false;
2456 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2457 (counterparty_initial_commitment_tx, funding_signed)
2459 // TODO (taproot|arik)
2466 // Internal utility functions for channels
2468 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2469 /// `channel_value_satoshis` in msat, set through
2470 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2472 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2474 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2475 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2476 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2478 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2481 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2483 channel_value_satoshis * 10 * configured_percent
2486 /// Returns a minimum channel reserve value the remote needs to maintain,
2487 /// required by us according to the configured or default
2488 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2490 /// Guaranteed to return a value no larger than channel_value_satoshis
2492 /// This is used both for outbound and inbound channels and has lower bound
2493 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2494 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2495 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2496 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2499 /// This is for legacy reasons, present for forward-compatibility.
2500 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2501 /// from storage. Hence, we use this function to not persist default values of
2502 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2503 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2504 let (q, _) = channel_value_satoshis.overflowing_div(100);
2505 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2508 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2509 // Note that num_htlcs should not include dust HTLCs.
2511 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2512 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2515 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2516 // Note that num_htlcs should not include dust HTLCs.
2517 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2518 // Note that we need to divide before multiplying to round properly,
2519 // since the lowest denomination of bitcoin on-chain is the satoshi.
2520 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2523 // Holder designates channel data owned for the benefit of the user client.
2524 // Counterparty designates channel data owned by the another channel participant entity.
2525 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2526 pub context: ChannelContext<SP>,
2529 #[cfg(any(test, fuzzing))]
2530 struct CommitmentTxInfoCached {
2532 total_pending_htlcs: usize,
2533 next_holder_htlc_id: u64,
2534 next_counterparty_htlc_id: u64,
2538 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
2539 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
2540 trait FailHTLCContents {
2541 type Message: FailHTLCMessageName;
2542 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
2543 fn to_inbound_htlc_state(self) -> InboundHTLCState;
2544 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
2546 impl FailHTLCContents for msgs::OnionErrorPacket {
2547 type Message = msgs::UpdateFailHTLC;
2548 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2549 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
2551 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2552 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
2554 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2555 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
2558 impl FailHTLCContents for ([u8; 32], u16) {
2559 type Message = msgs::UpdateFailMalformedHTLC;
2560 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2561 msgs::UpdateFailMalformedHTLC {
2564 sha256_of_onion: self.0,
2565 failure_code: self.1
2568 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2569 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
2571 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2572 HTLCUpdateAwaitingACK::FailMalformedHTLC {
2574 sha256_of_onion: self.0,
2575 failure_code: self.1
2580 trait FailHTLCMessageName {
2581 fn name() -> &'static str;
2583 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
2584 fn name() -> &'static str {
2588 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
2589 fn name() -> &'static str {
2590 "update_fail_malformed_htlc"
2594 impl<SP: Deref> Channel<SP> where
2595 SP::Target: SignerProvider,
2596 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2598 fn check_remote_fee<F: Deref, L: Deref>(
2599 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2600 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2601 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2603 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2604 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2606 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2608 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2609 if feerate_per_kw < lower_limit {
2610 if let Some(cur_feerate) = cur_feerate_per_kw {
2611 if feerate_per_kw > cur_feerate {
2613 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2614 cur_feerate, feerate_per_kw);
2618 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2624 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2625 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2626 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2627 // outside of those situations will fail.
2628 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2632 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2637 1 + // script length (0)
2641 )*4 + // * 4 for non-witness parts
2642 2 + // witness marker and flag
2643 1 + // witness element count
2644 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2645 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2646 2*(1 + 71); // two signatures + sighash type flags
2647 if let Some(spk) = a_scriptpubkey {
2648 ret += ((8+1) + // output values and script length
2649 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2651 if let Some(spk) = b_scriptpubkey {
2652 ret += ((8+1) + // output values and script length
2653 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2659 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2660 assert!(self.context.pending_inbound_htlcs.is_empty());
2661 assert!(self.context.pending_outbound_htlcs.is_empty());
2662 assert!(self.context.pending_update_fee.is_none());
2664 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2665 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2666 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2668 if value_to_holder < 0 {
2669 assert!(self.context.is_outbound());
2670 total_fee_satoshis += (-value_to_holder) as u64;
2671 } else if value_to_counterparty < 0 {
2672 assert!(!self.context.is_outbound());
2673 total_fee_satoshis += (-value_to_counterparty) as u64;
2676 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2677 value_to_counterparty = 0;
2680 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2681 value_to_holder = 0;
2684 assert!(self.context.shutdown_scriptpubkey.is_some());
2685 let holder_shutdown_script = self.get_closing_scriptpubkey();
2686 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2687 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2689 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2690 (closing_transaction, total_fee_satoshis)
2693 fn funding_outpoint(&self) -> OutPoint {
2694 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2697 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2700 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2701 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2703 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2705 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2706 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2707 where L::Target: Logger {
2708 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2709 // (see equivalent if condition there).
2710 assert!(self.context.channel_state.should_force_holding_cell());
2711 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2712 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2713 self.context.latest_monitor_update_id = mon_update_id;
2714 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2715 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2719 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2720 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2721 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2722 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2724 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2725 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2728 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2729 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2730 // these, but for now we just have to treat them as normal.
2732 let mut pending_idx = core::usize::MAX;
2733 let mut htlc_value_msat = 0;
2734 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2735 if htlc.htlc_id == htlc_id_arg {
2736 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2737 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2738 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2740 InboundHTLCState::Committed => {},
2741 InboundHTLCState::LocalRemoved(ref reason) => {
2742 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2744 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2745 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2747 return UpdateFulfillFetch::DuplicateClaim {};
2750 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2751 // Don't return in release mode here so that we can update channel_monitor
2755 htlc_value_msat = htlc.amount_msat;
2759 if pending_idx == core::usize::MAX {
2760 #[cfg(any(test, fuzzing))]
2761 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2762 // this is simply a duplicate claim, not previously failed and we lost funds.
2763 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2764 return UpdateFulfillFetch::DuplicateClaim {};
2767 // Now update local state:
2769 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2770 // can claim it even if the channel hits the chain before we see their next commitment.
2771 self.context.latest_monitor_update_id += 1;
2772 let monitor_update = ChannelMonitorUpdate {
2773 update_id: self.context.latest_monitor_update_id,
2774 counterparty_node_id: Some(self.context.counterparty_node_id),
2775 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2776 payment_preimage: payment_preimage_arg.clone(),
2780 if self.context.channel_state.should_force_holding_cell() {
2781 // Note that this condition is the same as the assertion in
2782 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2783 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2784 // do not not get into this branch.
2785 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2786 match pending_update {
2787 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2788 if htlc_id_arg == htlc_id {
2789 // Make sure we don't leave latest_monitor_update_id incremented here:
2790 self.context.latest_monitor_update_id -= 1;
2791 #[cfg(any(test, fuzzing))]
2792 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2793 return UpdateFulfillFetch::DuplicateClaim {};
2796 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2797 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2799 if htlc_id_arg == htlc_id {
2800 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2801 // TODO: We may actually be able to switch to a fulfill here, though its
2802 // rare enough it may not be worth the complexity burden.
2803 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2804 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2810 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
2811 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2812 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2814 #[cfg(any(test, fuzzing))]
2815 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2816 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2818 #[cfg(any(test, fuzzing))]
2819 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2822 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2823 if let InboundHTLCState::Committed = htlc.state {
2825 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2826 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2828 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2829 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2832 UpdateFulfillFetch::NewClaim {
2835 msg: Some(msgs::UpdateFulfillHTLC {
2836 channel_id: self.context.channel_id(),
2837 htlc_id: htlc_id_arg,
2838 payment_preimage: payment_preimage_arg,
2843 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2844 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2845 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2846 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2847 // Even if we aren't supposed to let new monitor updates with commitment state
2848 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2849 // matter what. Sadly, to push a new monitor update which flies before others
2850 // already queued, we have to insert it into the pending queue and update the
2851 // update_ids of all the following monitors.
2852 if release_cs_monitor && msg.is_some() {
2853 let mut additional_update = self.build_commitment_no_status_check(logger);
2854 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2855 // to be strictly increasing by one, so decrement it here.
2856 self.context.latest_monitor_update_id = monitor_update.update_id;
2857 monitor_update.updates.append(&mut additional_update.updates);
2859 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2860 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2861 monitor_update.update_id = new_mon_id;
2862 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2863 held_update.update.update_id += 1;
2866 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2867 let update = self.build_commitment_no_status_check(logger);
2868 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2874 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2875 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2877 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2881 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2882 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2883 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2884 /// before we fail backwards.
2886 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2887 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2888 /// [`ChannelError::Ignore`].
2889 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2890 -> Result<(), ChannelError> where L::Target: Logger {
2891 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2892 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2895 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
2896 /// want to fail blinded HTLCs where we are not the intro node.
2898 /// See [`Self::queue_fail_htlc`] for more info.
2899 pub fn queue_fail_malformed_htlc<L: Deref>(
2900 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
2901 ) -> Result<(), ChannelError> where L::Target: Logger {
2902 self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
2903 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2906 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2907 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2908 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2909 /// before we fail backwards.
2911 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2912 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2913 /// [`ChannelError::Ignore`].
2914 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
2915 &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
2917 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
2918 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2919 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2922 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2923 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2924 // these, but for now we just have to treat them as normal.
2926 let mut pending_idx = core::usize::MAX;
2927 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2928 if htlc.htlc_id == htlc_id_arg {
2930 InboundHTLCState::Committed => {},
2931 InboundHTLCState::LocalRemoved(ref reason) => {
2932 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2934 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2939 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2940 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2946 if pending_idx == core::usize::MAX {
2947 #[cfg(any(test, fuzzing))]
2948 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2949 // is simply a duplicate fail, not previously failed and we failed-back too early.
2950 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2954 if self.context.channel_state.should_force_holding_cell() {
2955 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2956 force_holding_cell = true;
2959 // Now update local state:
2960 if force_holding_cell {
2961 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2962 match pending_update {
2963 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2964 if htlc_id_arg == htlc_id {
2965 #[cfg(any(test, fuzzing))]
2966 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2970 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2971 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2973 if htlc_id_arg == htlc_id {
2974 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2975 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2981 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2982 self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
2986 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
2987 E::Message::name(), &self.context.channel_id());
2989 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2990 htlc.state = err_contents.clone().to_inbound_htlc_state();
2993 Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
2996 // Message handlers:
2997 /// Updates the state of the channel to indicate that all channels in the batch have received
2998 /// funding_signed and persisted their monitors.
2999 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
3000 /// treated as a non-batch channel going forward.
3001 pub fn set_batch_ready(&mut self) {
3002 self.context.is_batch_funding = None;
3003 self.context.channel_state.clear_waiting_for_batch();
3006 /// Unsets the existing funding information.
3008 /// This must only be used if the channel has not yet completed funding and has not been used.
3010 /// Further, the channel must be immediately shut down after this with a call to
3011 /// [`ChannelContext::force_shutdown`].
3012 pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
3013 debug_assert!(matches!(
3014 self.context.channel_state, ChannelState::AwaitingChannelReady(_)
3016 self.context.channel_transaction_parameters.funding_outpoint = None;
3017 self.context.channel_id = temporary_channel_id;
3020 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3021 /// and the channel is now usable (and public), this may generate an announcement_signatures to
3023 pub fn channel_ready<NS: Deref, L: Deref>(
3024 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
3025 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3026 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3028 NS::Target: NodeSigner,
3031 if self.context.channel_state.is_peer_disconnected() {
3032 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3033 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3036 if let Some(scid_alias) = msg.short_channel_id_alias {
3037 if Some(scid_alias) != self.context.short_channel_id {
3038 // The scid alias provided can be used to route payments *from* our counterparty,
3039 // i.e. can be used for inbound payments and provided in invoices, but is not used
3040 // when routing outbound payments.
3041 self.context.latest_inbound_scid_alias = Some(scid_alias);
3045 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3046 // batch, but we can receive channel_ready messages.
3047 let mut check_reconnection = false;
3048 match &self.context.channel_state {
3049 ChannelState::AwaitingChannelReady(flags) => {
3050 let flags = *flags & !FundedStateFlags::ALL;
3051 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3052 if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3053 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3054 check_reconnection = true;
3055 } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3056 self.context.channel_state.set_their_channel_ready();
3057 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3058 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3059 self.context.update_time_counter += 1;
3061 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3062 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3065 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3066 ChannelState::ChannelReady(_) => check_reconnection = true,
3067 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
3069 if check_reconnection {
3070 // They probably disconnected/reconnected and re-sent the channel_ready, which is
3071 // required, or they're sending a fresh SCID alias.
3072 let expected_point =
3073 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3074 // If they haven't ever sent an updated point, the point they send should match
3076 self.context.counterparty_cur_commitment_point
3077 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3078 // If we've advanced the commitment number once, the second commitment point is
3079 // at `counterparty_prev_commitment_point`, which is not yet revoked.
3080 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3081 self.context.counterparty_prev_commitment_point
3083 // If they have sent updated points, channel_ready is always supposed to match
3084 // their "first" point, which we re-derive here.
3085 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3086 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3087 ).expect("We already advanced, so previous secret keys should have been validated already")))
3089 if expected_point != Some(msg.next_per_commitment_point) {
3090 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3095 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3096 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3098 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3100 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3103 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3104 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3105 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3106 ) -> Result<(), ChannelError>
3107 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3108 FE::Target: FeeEstimator, L::Target: Logger,
3110 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3111 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3113 // We can't accept HTLCs sent after we've sent a shutdown.
3114 if self.context.channel_state.is_local_shutdown_sent() {
3115 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3117 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3118 if self.context.channel_state.is_remote_shutdown_sent() {
3119 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3121 if self.context.channel_state.is_peer_disconnected() {
3122 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3124 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3125 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3127 if msg.amount_msat == 0 {
3128 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3130 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3131 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3134 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3135 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3136 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3137 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3139 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3140 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3143 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3144 // the reserve_satoshis we told them to always have as direct payment so that they lose
3145 // something if we punish them for broadcasting an old state).
3146 // Note that we don't really care about having a small/no to_remote output in our local
3147 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3148 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3149 // present in the next commitment transaction we send them (at least for fulfilled ones,
3150 // failed ones won't modify value_to_self).
3151 // Note that we will send HTLCs which another instance of rust-lightning would think
3152 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3153 // Channel state once they will not be present in the next received commitment
3155 let mut removed_outbound_total_msat = 0;
3156 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3157 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3158 removed_outbound_total_msat += htlc.amount_msat;
3159 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3160 removed_outbound_total_msat += htlc.amount_msat;
3164 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3165 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3168 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3169 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3170 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3172 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3173 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3174 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3175 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3176 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3177 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3178 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3182 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3183 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3184 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3185 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3186 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3187 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3188 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3192 let pending_value_to_self_msat =
3193 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3194 let pending_remote_value_msat =
3195 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3196 if pending_remote_value_msat < msg.amount_msat {
3197 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3200 // Check that the remote can afford to pay for this HTLC on-chain at the current
3201 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3203 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3204 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3205 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3207 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3208 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3212 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3213 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3215 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3216 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3220 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3221 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3225 if !self.context.is_outbound() {
3226 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3227 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3228 // side, only on the sender's. Note that with anchor outputs we are no longer as
3229 // sensitive to fee spikes, so we need to account for them.
3230 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3231 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3232 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3233 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3235 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3236 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3237 // the HTLC, i.e. its status is already set to failing.
3238 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3239 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3242 // Check that they won't violate our local required channel reserve by adding this HTLC.
3243 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3244 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3245 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3246 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3249 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3250 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3252 if msg.cltv_expiry >= 500000000 {
3253 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3256 if self.context.channel_state.is_local_shutdown_sent() {
3257 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3258 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3262 // Now update local state:
3263 self.context.next_counterparty_htlc_id += 1;
3264 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3265 htlc_id: msg.htlc_id,
3266 amount_msat: msg.amount_msat,
3267 payment_hash: msg.payment_hash,
3268 cltv_expiry: msg.cltv_expiry,
3269 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3274 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3276 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3277 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3278 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3279 if htlc.htlc_id == htlc_id {
3280 let outcome = match check_preimage {
3281 None => fail_reason.into(),
3282 Some(payment_preimage) => {
3283 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3284 if payment_hash != htlc.payment_hash {
3285 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3287 OutboundHTLCOutcome::Success(Some(payment_preimage))
3291 OutboundHTLCState::LocalAnnounced(_) =>
3292 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3293 OutboundHTLCState::Committed => {
3294 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3296 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3297 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3302 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3305 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3306 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3307 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3309 if self.context.channel_state.is_peer_disconnected() {
3310 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3313 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3316 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3317 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3318 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3320 if self.context.channel_state.is_peer_disconnected() {
3321 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3324 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3328 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3329 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3330 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3332 if self.context.channel_state.is_peer_disconnected() {
3333 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3336 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3340 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3341 where L::Target: Logger
3343 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3344 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3346 if self.context.channel_state.is_peer_disconnected() {
3347 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3349 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3350 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3353 let funding_script = self.context.get_funding_redeemscript();
3355 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3357 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3358 let commitment_txid = {
3359 let trusted_tx = commitment_stats.tx.trust();
3360 let bitcoin_tx = trusted_tx.built_transaction();
3361 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3363 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3364 log_bytes!(msg.signature.serialize_compact()[..]),
3365 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3366 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3367 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3368 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3372 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3374 // If our counterparty updated the channel fee in this commitment transaction, check that
3375 // they can actually afford the new fee now.
3376 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3377 update_state == FeeUpdateState::RemoteAnnounced
3380 debug_assert!(!self.context.is_outbound());
3381 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3382 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3383 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3386 #[cfg(any(test, fuzzing))]
3388 if self.context.is_outbound() {
3389 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3390 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3391 if let Some(info) = projected_commit_tx_info {
3392 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3393 + self.context.holding_cell_htlc_updates.len();
3394 if info.total_pending_htlcs == total_pending_htlcs
3395 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3396 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3397 && info.feerate == self.context.feerate_per_kw {
3398 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3404 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3405 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3408 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3409 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3410 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3411 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3412 // backwards compatibility, we never use it in production. To provide test coverage, here,
3413 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3414 #[allow(unused_assignments, unused_mut)]
3415 let mut separate_nondust_htlc_sources = false;
3416 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3417 use core::hash::{BuildHasher, Hasher};
3418 // Get a random value using the only std API to do so - the DefaultHasher
3419 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3420 separate_nondust_htlc_sources = rand_val % 2 == 0;
3423 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3424 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3425 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3426 if let Some(_) = htlc.transaction_output_index {
3427 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3428 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3429 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3431 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3432 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3433 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3434 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3435 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3436 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3437 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3438 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3440 if !separate_nondust_htlc_sources {
3441 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3444 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3446 if separate_nondust_htlc_sources {
3447 if let Some(source) = source_opt.take() {
3448 nondust_htlc_sources.push(source);
3451 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3454 let holder_commitment_tx = HolderCommitmentTransaction::new(
3455 commitment_stats.tx,
3457 msg.htlc_signatures.clone(),
3458 &self.context.get_holder_pubkeys().funding_pubkey,
3459 self.context.counterparty_funding_pubkey()
3462 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3463 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3465 // Update state now that we've passed all the can-fail calls...
3466 let mut need_commitment = false;
3467 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3468 if *update_state == FeeUpdateState::RemoteAnnounced {
3469 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3470 need_commitment = true;
3474 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3475 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3476 Some(forward_info.clone())
3478 if let Some(forward_info) = new_forward {
3479 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3480 &htlc.payment_hash, &self.context.channel_id);
3481 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3482 need_commitment = true;
3485 let mut claimed_htlcs = Vec::new();
3486 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3487 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3488 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3489 &htlc.payment_hash, &self.context.channel_id);
3490 // Grab the preimage, if it exists, instead of cloning
3491 let mut reason = OutboundHTLCOutcome::Success(None);
3492 mem::swap(outcome, &mut reason);
3493 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3494 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3495 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3496 // have a `Success(None)` reason. In this case we could forget some HTLC
3497 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3498 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3500 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3502 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3503 need_commitment = true;
3507 self.context.latest_monitor_update_id += 1;
3508 let mut monitor_update = ChannelMonitorUpdate {
3509 update_id: self.context.latest_monitor_update_id,
3510 counterparty_node_id: Some(self.context.counterparty_node_id),
3511 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3512 commitment_tx: holder_commitment_tx,
3513 htlc_outputs: htlcs_and_sigs,
3515 nondust_htlc_sources,
3519 self.context.cur_holder_commitment_transaction_number -= 1;
3520 self.context.expecting_peer_commitment_signed = false;
3521 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3522 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3523 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3525 if self.context.channel_state.is_monitor_update_in_progress() {
3526 // In case we initially failed monitor updating without requiring a response, we need
3527 // to make sure the RAA gets sent first.
3528 self.context.monitor_pending_revoke_and_ack = true;
3529 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3530 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3531 // the corresponding HTLC status updates so that
3532 // get_last_commitment_update_for_send includes the right HTLCs.
3533 self.context.monitor_pending_commitment_signed = true;
3534 let mut additional_update = self.build_commitment_no_status_check(logger);
3535 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3536 // strictly increasing by one, so decrement it here.
3537 self.context.latest_monitor_update_id = monitor_update.update_id;
3538 monitor_update.updates.append(&mut additional_update.updates);
3540 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3541 &self.context.channel_id);
3542 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3545 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3546 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3547 // we'll send one right away when we get the revoke_and_ack when we
3548 // free_holding_cell_htlcs().
3549 let mut additional_update = self.build_commitment_no_status_check(logger);
3550 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3551 // strictly increasing by one, so decrement it here.
3552 self.context.latest_monitor_update_id = monitor_update.update_id;
3553 monitor_update.updates.append(&mut additional_update.updates);
3557 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3558 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3559 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3560 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3563 /// Public version of the below, checking relevant preconditions first.
3564 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3565 /// returns `(None, Vec::new())`.
3566 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3567 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3568 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3569 where F::Target: FeeEstimator, L::Target: Logger
3571 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
3572 self.free_holding_cell_htlcs(fee_estimator, logger)
3573 } else { (None, Vec::new()) }
3576 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3577 /// for our counterparty.
3578 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3579 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3580 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3581 where F::Target: FeeEstimator, L::Target: Logger
3583 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3584 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3585 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3586 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3588 let mut monitor_update = ChannelMonitorUpdate {
3589 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3590 counterparty_node_id: Some(self.context.counterparty_node_id),
3591 updates: Vec::new(),
3594 let mut htlc_updates = Vec::new();
3595 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3596 let mut update_add_count = 0;
3597 let mut update_fulfill_count = 0;
3598 let mut update_fail_count = 0;
3599 let mut htlcs_to_fail = Vec::new();
3600 for htlc_update in htlc_updates.drain(..) {
3601 // Note that this *can* fail, though it should be due to rather-rare conditions on
3602 // fee races with adding too many outputs which push our total payments just over
3603 // the limit. In case it's less rare than I anticipate, we may want to revisit
3604 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3605 // to rebalance channels.
3606 let fail_htlc_res = match &htlc_update {
3607 &HTLCUpdateAwaitingACK::AddHTLC {
3608 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3609 skimmed_fee_msat, blinding_point, ..
3611 match self.send_htlc(
3612 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3613 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3615 Ok(_) => update_add_count += 1,
3618 ChannelError::Ignore(ref msg) => {
3619 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3620 // If we fail to send here, then this HTLC should
3621 // be failed backwards. Failing to send here
3622 // indicates that this HTLC may keep being put back
3623 // into the holding cell without ever being
3624 // successfully forwarded/failed/fulfilled, causing
3625 // our counterparty to eventually close on us.
3626 htlcs_to_fail.push((source.clone(), *payment_hash));
3629 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3636 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3637 // If an HTLC claim was previously added to the holding cell (via
3638 // `get_update_fulfill_htlc`, then generating the claim message itself must
3639 // not fail - any in between attempts to claim the HTLC will have resulted
3640 // in it hitting the holding cell again and we cannot change the state of a
3641 // holding cell HTLC from fulfill to anything else.
3642 let mut additional_monitor_update =
3643 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3644 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3645 { monitor_update } else { unreachable!() };
3646 update_fulfill_count += 1;
3647 monitor_update.updates.append(&mut additional_monitor_update.updates);
3650 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3651 Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
3652 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
3654 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
3655 Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
3656 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
3659 if let Some(res) = fail_htlc_res {
3661 Ok(fail_msg_opt) => {
3662 // If an HTLC failure was previously added to the holding cell (via
3663 // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
3664 // not fail - we should never end up in a state where we double-fail
3665 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3666 // for a full revocation before failing.
3667 debug_assert!(fail_msg_opt.is_some());
3668 update_fail_count += 1;
3670 Err(ChannelError::Ignore(_)) => {},
3672 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3677 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3678 return (None, htlcs_to_fail);
3680 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3681 self.send_update_fee(feerate, false, fee_estimator, logger)
3686 let mut additional_update = self.build_commitment_no_status_check(logger);
3687 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3688 // but we want them to be strictly increasing by one, so reset it here.
3689 self.context.latest_monitor_update_id = monitor_update.update_id;
3690 monitor_update.updates.append(&mut additional_update.updates);
3692 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3693 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3694 update_add_count, update_fulfill_count, update_fail_count);
3696 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3697 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3703 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3704 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3705 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3706 /// generating an appropriate error *after* the channel state has been updated based on the
3707 /// revoke_and_ack message.
3708 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3709 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3710 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3711 where F::Target: FeeEstimator, L::Target: Logger,
3713 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3714 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3716 if self.context.channel_state.is_peer_disconnected() {
3717 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3719 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3720 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3723 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3725 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3726 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3727 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3731 if !self.context.channel_state.is_awaiting_remote_revoke() {
3732 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3733 // haven't given them a new commitment transaction to broadcast). We should probably
3734 // take advantage of this by updating our channel monitor, sending them an error, and
3735 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3736 // lot of work, and there's some chance this is all a misunderstanding anyway.
3737 // We have to do *something*, though, since our signer may get mad at us for otherwise
3738 // jumping a remote commitment number, so best to just force-close and move on.
3739 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3742 #[cfg(any(test, fuzzing))]
3744 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3745 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3748 match &self.context.holder_signer {
3749 ChannelSignerType::Ecdsa(ecdsa) => {
3750 ecdsa.validate_counterparty_revocation(
3751 self.context.cur_counterparty_commitment_transaction_number + 1,
3753 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3755 // TODO (taproot|arik)
3760 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3761 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3762 self.context.latest_monitor_update_id += 1;
3763 let mut monitor_update = ChannelMonitorUpdate {
3764 update_id: self.context.latest_monitor_update_id,
3765 counterparty_node_id: Some(self.context.counterparty_node_id),
3766 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3767 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3768 secret: msg.per_commitment_secret,
3772 // Update state now that we've passed all the can-fail calls...
3773 // (note that we may still fail to generate the new commitment_signed message, but that's
3774 // OK, we step the channel here and *then* if the new generation fails we can fail the
3775 // channel based on that, but stepping stuff here should be safe either way.
3776 self.context.channel_state.clear_awaiting_remote_revoke();
3777 self.context.sent_message_awaiting_response = None;
3778 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3779 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3780 self.context.cur_counterparty_commitment_transaction_number -= 1;
3782 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3783 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3786 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3787 let mut to_forward_infos = Vec::new();
3788 let mut revoked_htlcs = Vec::new();
3789 let mut finalized_claimed_htlcs = Vec::new();
3790 let mut update_fail_htlcs = Vec::new();
3791 let mut update_fail_malformed_htlcs = Vec::new();
3792 let mut require_commitment = false;
3793 let mut value_to_self_msat_diff: i64 = 0;
3796 // Take references explicitly so that we can hold multiple references to self.context.
3797 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3798 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3799 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3801 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3802 pending_inbound_htlcs.retain(|htlc| {
3803 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3804 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3805 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3806 value_to_self_msat_diff += htlc.amount_msat as i64;
3808 *expecting_peer_commitment_signed = true;
3812 pending_outbound_htlcs.retain(|htlc| {
3813 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3814 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3815 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3816 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3818 finalized_claimed_htlcs.push(htlc.source.clone());
3819 // They fulfilled, so we sent them money
3820 value_to_self_msat_diff -= htlc.amount_msat as i64;
3825 for htlc in pending_inbound_htlcs.iter_mut() {
3826 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3828 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3832 let mut state = InboundHTLCState::Committed;
3833 mem::swap(&mut state, &mut htlc.state);
3835 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3836 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3837 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3838 require_commitment = true;
3839 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3840 match forward_info {
3841 PendingHTLCStatus::Fail(fail_msg) => {
3842 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3843 require_commitment = true;
3845 HTLCFailureMsg::Relay(msg) => {
3846 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3847 update_fail_htlcs.push(msg)
3849 HTLCFailureMsg::Malformed(msg) => {
3850 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3851 update_fail_malformed_htlcs.push(msg)
3855 PendingHTLCStatus::Forward(forward_info) => {
3856 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3857 to_forward_infos.push((forward_info, htlc.htlc_id));
3858 htlc.state = InboundHTLCState::Committed;
3864 for htlc in pending_outbound_htlcs.iter_mut() {
3865 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3866 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3867 htlc.state = OutboundHTLCState::Committed;
3868 *expecting_peer_commitment_signed = true;
3870 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3871 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3872 // Grab the preimage, if it exists, instead of cloning
3873 let mut reason = OutboundHTLCOutcome::Success(None);
3874 mem::swap(outcome, &mut reason);
3875 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3876 require_commitment = true;
3880 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3882 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3883 match update_state {
3884 FeeUpdateState::Outbound => {
3885 debug_assert!(self.context.is_outbound());
3886 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3887 self.context.feerate_per_kw = feerate;
3888 self.context.pending_update_fee = None;
3889 self.context.expecting_peer_commitment_signed = true;
3891 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3892 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3893 debug_assert!(!self.context.is_outbound());
3894 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3895 require_commitment = true;
3896 self.context.feerate_per_kw = feerate;
3897 self.context.pending_update_fee = None;
3902 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3903 let release_state_str =
3904 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3905 macro_rules! return_with_htlcs_to_fail {
3906 ($htlcs_to_fail: expr) => {
3907 if !release_monitor {
3908 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3909 update: monitor_update,
3911 return Ok(($htlcs_to_fail, None));
3913 return Ok(($htlcs_to_fail, Some(monitor_update)));
3918 if self.context.channel_state.is_monitor_update_in_progress() {
3919 // We can't actually generate a new commitment transaction (incl by freeing holding
3920 // cells) while we can't update the monitor, so we just return what we have.
3921 if require_commitment {
3922 self.context.monitor_pending_commitment_signed = true;
3923 // When the monitor updating is restored we'll call
3924 // get_last_commitment_update_for_send(), which does not update state, but we're
3925 // definitely now awaiting a remote revoke before we can step forward any more, so
3927 let mut additional_update = self.build_commitment_no_status_check(logger);
3928 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3929 // strictly increasing by one, so decrement it here.
3930 self.context.latest_monitor_update_id = monitor_update.update_id;
3931 monitor_update.updates.append(&mut additional_update.updates);
3933 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3934 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3935 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3936 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3937 return_with_htlcs_to_fail!(Vec::new());
3940 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3941 (Some(mut additional_update), htlcs_to_fail) => {
3942 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3943 // strictly increasing by one, so decrement it here.
3944 self.context.latest_monitor_update_id = monitor_update.update_id;
3945 monitor_update.updates.append(&mut additional_update.updates);
3947 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3948 &self.context.channel_id(), release_state_str);
3950 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3951 return_with_htlcs_to_fail!(htlcs_to_fail);
3953 (None, htlcs_to_fail) => {
3954 if require_commitment {
3955 let mut additional_update = self.build_commitment_no_status_check(logger);
3957 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3958 // strictly increasing by one, so decrement it here.
3959 self.context.latest_monitor_update_id = monitor_update.update_id;
3960 monitor_update.updates.append(&mut additional_update.updates);
3962 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3963 &self.context.channel_id(),
3964 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3967 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3968 return_with_htlcs_to_fail!(htlcs_to_fail);
3970 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3971 &self.context.channel_id(), release_state_str);
3973 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3974 return_with_htlcs_to_fail!(htlcs_to_fail);
3980 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3981 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3982 /// commitment update.
3983 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3984 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3985 where F::Target: FeeEstimator, L::Target: Logger
3987 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3988 assert!(msg_opt.is_none(), "We forced holding cell?");
3991 /// Adds a pending update to this channel. See the doc for send_htlc for
3992 /// further details on the optionness of the return value.
3993 /// If our balance is too low to cover the cost of the next commitment transaction at the
3994 /// new feerate, the update is cancelled.
3996 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3997 /// [`Channel`] if `force_holding_cell` is false.
3998 fn send_update_fee<F: Deref, L: Deref>(
3999 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
4000 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4001 ) -> Option<msgs::UpdateFee>
4002 where F::Target: FeeEstimator, L::Target: Logger
4004 if !self.context.is_outbound() {
4005 panic!("Cannot send fee from inbound channel");
4007 if !self.context.is_usable() {
4008 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
4010 if !self.context.is_live() {
4011 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
4014 // Before proposing a feerate update, check that we can actually afford the new fee.
4015 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
4016 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
4017 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4018 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4019 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
4020 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
4021 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4022 //TODO: auto-close after a number of failures?
4023 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
4027 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
4028 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4029 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4030 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4031 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4032 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4035 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4036 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4040 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4041 force_holding_cell = true;
4044 if force_holding_cell {
4045 self.context.holding_cell_update_fee = Some(feerate_per_kw);
4049 debug_assert!(self.context.pending_update_fee.is_none());
4050 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4052 Some(msgs::UpdateFee {
4053 channel_id: self.context.channel_id,
4058 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4059 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4061 /// No further message handling calls may be made until a channel_reestablish dance has
4063 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
4064 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
4065 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4066 if self.context.channel_state.is_pre_funded_state() {
4070 if self.context.channel_state.is_peer_disconnected() {
4071 // While the below code should be idempotent, it's simpler to just return early, as
4072 // redundant disconnect events can fire, though they should be rare.
4076 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4077 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4080 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4081 // will be retransmitted.
4082 self.context.last_sent_closing_fee = None;
4083 self.context.pending_counterparty_closing_signed = None;
4084 self.context.closing_fee_limits = None;
4086 let mut inbound_drop_count = 0;
4087 self.context.pending_inbound_htlcs.retain(|htlc| {
4089 InboundHTLCState::RemoteAnnounced(_) => {
4090 // They sent us an update_add_htlc but we never got the commitment_signed.
4091 // We'll tell them what commitment_signed we're expecting next and they'll drop
4092 // this HTLC accordingly
4093 inbound_drop_count += 1;
4096 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4097 // We received a commitment_signed updating this HTLC and (at least hopefully)
4098 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4099 // in response to it yet, so don't touch it.
4102 InboundHTLCState::Committed => true,
4103 InboundHTLCState::LocalRemoved(_) => {
4104 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4105 // re-transmit if needed) and they may have even sent a revoke_and_ack back
4106 // (that we missed). Keep this around for now and if they tell us they missed
4107 // the commitment_signed we can re-transmit the update then.
4112 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4114 if let Some((_, update_state)) = self.context.pending_update_fee {
4115 if update_state == FeeUpdateState::RemoteAnnounced {
4116 debug_assert!(!self.context.is_outbound());
4117 self.context.pending_update_fee = None;
4121 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4122 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4123 // They sent us an update to remove this but haven't yet sent the corresponding
4124 // commitment_signed, we need to move it back to Committed and they can re-send
4125 // the update upon reconnection.
4126 htlc.state = OutboundHTLCState::Committed;
4130 self.context.sent_message_awaiting_response = None;
4132 self.context.channel_state.set_peer_disconnected();
4133 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4137 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4138 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4139 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4140 /// update completes (potentially immediately).
4141 /// The messages which were generated with the monitor update must *not* have been sent to the
4142 /// remote end, and must instead have been dropped. They will be regenerated when
4143 /// [`Self::monitor_updating_restored`] is called.
4145 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4146 /// [`chain::Watch`]: crate::chain::Watch
4147 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4148 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4149 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4150 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4151 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4153 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4154 self.context.monitor_pending_commitment_signed |= resend_commitment;
4155 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4156 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4157 self.context.monitor_pending_failures.append(&mut pending_fails);
4158 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4159 self.context.channel_state.set_monitor_update_in_progress();
4162 /// Indicates that the latest ChannelMonitor update has been committed by the client
4163 /// successfully and we should restore normal operation. Returns messages which should be sent
4164 /// to the remote side.
4165 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4166 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4167 user_config: &UserConfig, best_block_height: u32
4168 ) -> MonitorRestoreUpdates
4171 NS::Target: NodeSigner
4173 assert!(self.context.channel_state.is_monitor_update_in_progress());
4174 self.context.channel_state.clear_monitor_update_in_progress();
4176 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4177 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4178 // first received the funding_signed.
4179 let mut funding_broadcastable =
4180 if self.context.is_outbound() &&
4181 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4182 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
4184 self.context.funding_transaction.take()
4186 // That said, if the funding transaction is already confirmed (ie we're active with a
4187 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4188 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4189 funding_broadcastable = None;
4192 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4193 // (and we assume the user never directly broadcasts the funding transaction and waits for
4194 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4195 // * an inbound channel that failed to persist the monitor on funding_created and we got
4196 // the funding transaction confirmed before the monitor was persisted, or
4197 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4198 let channel_ready = if self.context.monitor_pending_channel_ready {
4199 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4200 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4201 self.context.monitor_pending_channel_ready = false;
4202 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4203 Some(msgs::ChannelReady {
4204 channel_id: self.context.channel_id(),
4205 next_per_commitment_point,
4206 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4210 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4212 let mut accepted_htlcs = Vec::new();
4213 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4214 let mut failed_htlcs = Vec::new();
4215 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4216 let mut finalized_claimed_htlcs = Vec::new();
4217 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4219 if self.context.channel_state.is_peer_disconnected() {
4220 self.context.monitor_pending_revoke_and_ack = false;
4221 self.context.monitor_pending_commitment_signed = false;
4222 return MonitorRestoreUpdates {
4223 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4224 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4228 let raa = if self.context.monitor_pending_revoke_and_ack {
4229 Some(self.get_last_revoke_and_ack())
4231 let commitment_update = if self.context.monitor_pending_commitment_signed {
4232 self.get_last_commitment_update_for_send(logger).ok()
4234 if commitment_update.is_some() {
4235 self.mark_awaiting_response();
4238 self.context.monitor_pending_revoke_and_ack = false;
4239 self.context.monitor_pending_commitment_signed = false;
4240 let order = self.context.resend_order.clone();
4241 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4242 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4243 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4244 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4245 MonitorRestoreUpdates {
4246 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4250 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4251 where F::Target: FeeEstimator, L::Target: Logger
4253 if self.context.is_outbound() {
4254 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4256 if self.context.channel_state.is_peer_disconnected() {
4257 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4259 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4261 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4262 self.context.update_time_counter += 1;
4263 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4264 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4265 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4266 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4267 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4268 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4269 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4270 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4271 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4272 msg.feerate_per_kw, holder_tx_dust_exposure)));
4274 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4275 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4276 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4282 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4284 #[cfg(async_signing)]
4285 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4286 let commitment_update = if self.context.signer_pending_commitment_update {
4287 self.get_last_commitment_update_for_send(logger).ok()
4289 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4290 self.context.get_funding_signed_msg(logger).1
4292 let channel_ready = if funding_signed.is_some() {
4293 self.check_get_channel_ready(0)
4296 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
4297 if commitment_update.is_some() { "a" } else { "no" },
4298 if funding_signed.is_some() { "a" } else { "no" },
4299 if channel_ready.is_some() { "a" } else { "no" });
4301 SignerResumeUpdates {
4308 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4309 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4310 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4311 msgs::RevokeAndACK {
4312 channel_id: self.context.channel_id,
4313 per_commitment_secret,
4314 next_per_commitment_point,
4316 next_local_nonce: None,
4320 /// Gets the last commitment update for immediate sending to our peer.
4321 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4322 let mut update_add_htlcs = Vec::new();
4323 let mut update_fulfill_htlcs = Vec::new();
4324 let mut update_fail_htlcs = Vec::new();
4325 let mut update_fail_malformed_htlcs = Vec::new();
4327 for htlc in self.context.pending_outbound_htlcs.iter() {
4328 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4329 update_add_htlcs.push(msgs::UpdateAddHTLC {
4330 channel_id: self.context.channel_id(),
4331 htlc_id: htlc.htlc_id,
4332 amount_msat: htlc.amount_msat,
4333 payment_hash: htlc.payment_hash,
4334 cltv_expiry: htlc.cltv_expiry,
4335 onion_routing_packet: (**onion_packet).clone(),
4336 skimmed_fee_msat: htlc.skimmed_fee_msat,
4337 blinding_point: htlc.blinding_point,
4342 for htlc in self.context.pending_inbound_htlcs.iter() {
4343 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4345 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4346 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4347 channel_id: self.context.channel_id(),
4348 htlc_id: htlc.htlc_id,
4349 reason: err_packet.clone()
4352 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4353 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4354 channel_id: self.context.channel_id(),
4355 htlc_id: htlc.htlc_id,
4356 sha256_of_onion: sha256_of_onion.clone(),
4357 failure_code: failure_code.clone(),
4360 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4361 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4362 channel_id: self.context.channel_id(),
4363 htlc_id: htlc.htlc_id,
4364 payment_preimage: payment_preimage.clone(),
4371 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4372 Some(msgs::UpdateFee {
4373 channel_id: self.context.channel_id(),
4374 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4378 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4379 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4380 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4381 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4382 if self.context.signer_pending_commitment_update {
4383 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4384 self.context.signer_pending_commitment_update = false;
4388 #[cfg(not(async_signing))] {
4389 panic!("Failed to get signature for new commitment state");
4391 #[cfg(async_signing)] {
4392 if !self.context.signer_pending_commitment_update {
4393 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4394 self.context.signer_pending_commitment_update = true;
4399 Ok(msgs::CommitmentUpdate {
4400 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4405 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4406 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4407 if self.context.channel_state.is_local_shutdown_sent() {
4408 assert!(self.context.shutdown_scriptpubkey.is_some());
4409 Some(msgs::Shutdown {
4410 channel_id: self.context.channel_id,
4411 scriptpubkey: self.get_closing_scriptpubkey(),
4416 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4417 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4419 /// Some links printed in log lines are included here to check them during build (when run with
4420 /// `cargo doc --document-private-items`):
4421 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4422 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4423 pub fn channel_reestablish<L: Deref, NS: Deref>(
4424 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4425 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4426 ) -> Result<ReestablishResponses, ChannelError>
4429 NS::Target: NodeSigner
4431 if !self.context.channel_state.is_peer_disconnected() {
4432 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4433 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4434 // just close here instead of trying to recover.
4435 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4438 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4439 msg.next_local_commitment_number == 0 {
4440 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4443 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4444 if msg.next_remote_commitment_number > 0 {
4445 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4446 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4447 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4448 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4449 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4451 if msg.next_remote_commitment_number > our_commitment_transaction {
4452 macro_rules! log_and_panic {
4453 ($err_msg: expr) => {
4454 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4455 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4458 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4459 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4460 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4461 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4462 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4463 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4464 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4465 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4469 // Before we change the state of the channel, we check if the peer is sending a very old
4470 // commitment transaction number, if yes we send a warning message.
4471 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4472 return Err(ChannelError::Warn(format!(
4473 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4474 msg.next_remote_commitment_number,
4475 our_commitment_transaction
4479 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4480 // remaining cases either succeed or ErrorMessage-fail).
4481 self.context.channel_state.clear_peer_disconnected();
4482 self.context.sent_message_awaiting_response = None;
4484 let shutdown_msg = self.get_outbound_shutdown();
4486 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4488 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4489 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4490 if !self.context.channel_state.is_our_channel_ready() ||
4491 self.context.channel_state.is_monitor_update_in_progress() {
4492 if msg.next_remote_commitment_number != 0 {
4493 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4495 // Short circuit the whole handler as there is nothing we can resend them
4496 return Ok(ReestablishResponses {
4497 channel_ready: None,
4498 raa: None, commitment_update: None,
4499 order: RAACommitmentOrder::CommitmentFirst,
4500 shutdown_msg, announcement_sigs,
4504 // We have OurChannelReady set!
4505 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4506 return Ok(ReestablishResponses {
4507 channel_ready: Some(msgs::ChannelReady {
4508 channel_id: self.context.channel_id(),
4509 next_per_commitment_point,
4510 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4512 raa: None, commitment_update: None,
4513 order: RAACommitmentOrder::CommitmentFirst,
4514 shutdown_msg, announcement_sigs,
4518 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4519 // Remote isn't waiting on any RevokeAndACK from us!
4520 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4522 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4523 if self.context.channel_state.is_monitor_update_in_progress() {
4524 self.context.monitor_pending_revoke_and_ack = true;
4527 Some(self.get_last_revoke_and_ack())
4530 debug_assert!(false, "All values should have been handled in the four cases above");
4531 return Err(ChannelError::Close(format!(
4532 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4533 msg.next_remote_commitment_number,
4534 our_commitment_transaction
4538 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4539 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4540 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4541 // the corresponding revoke_and_ack back yet.
4542 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4543 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4544 self.mark_awaiting_response();
4546 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4548 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4549 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4550 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4551 Some(msgs::ChannelReady {
4552 channel_id: self.context.channel_id(),
4553 next_per_commitment_point,
4554 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4558 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4559 if required_revoke.is_some() {
4560 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4562 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4565 Ok(ReestablishResponses {
4566 channel_ready, shutdown_msg, announcement_sigs,
4567 raa: required_revoke,
4568 commitment_update: None,
4569 order: self.context.resend_order.clone(),
4571 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4572 if required_revoke.is_some() {
4573 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4575 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4578 if self.context.channel_state.is_monitor_update_in_progress() {
4579 self.context.monitor_pending_commitment_signed = true;
4580 Ok(ReestablishResponses {
4581 channel_ready, shutdown_msg, announcement_sigs,
4582 commitment_update: None, raa: None,
4583 order: self.context.resend_order.clone(),
4586 Ok(ReestablishResponses {
4587 channel_ready, shutdown_msg, announcement_sigs,
4588 raa: required_revoke,
4589 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4590 order: self.context.resend_order.clone(),
4593 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4594 Err(ChannelError::Close(format!(
4595 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4596 msg.next_local_commitment_number,
4597 next_counterparty_commitment_number,
4600 Err(ChannelError::Close(format!(
4601 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4602 msg.next_local_commitment_number,
4603 next_counterparty_commitment_number,
4608 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4609 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4610 /// at which point they will be recalculated.
4611 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4613 where F::Target: FeeEstimator
4615 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4617 // Propose a range from our current Background feerate to our Normal feerate plus our
4618 // force_close_avoidance_max_fee_satoshis.
4619 // If we fail to come to consensus, we'll have to force-close.
4620 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4621 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4622 // that we don't expect to need fee bumping
4623 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4624 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4626 // The spec requires that (when the channel does not have anchors) we only send absolute
4627 // channel fees no greater than the absolute channel fee on the current commitment
4628 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4629 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4630 // some force-closure by old nodes, but we wanted to close the channel anyway.
4632 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4633 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4634 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4635 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4638 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4639 // below our dust limit, causing the output to disappear. We don't bother handling this
4640 // case, however, as this should only happen if a channel is closed before any (material)
4641 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4642 // come to consensus with our counterparty on appropriate fees, however it should be a
4643 // relatively rare case. We can revisit this later, though note that in order to determine
4644 // if the funders' output is dust we have to know the absolute fee we're going to use.
4645 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4646 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4647 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4648 // We always add force_close_avoidance_max_fee_satoshis to our normal
4649 // feerate-calculated fee, but allow the max to be overridden if we're using a
4650 // target feerate-calculated fee.
4651 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4652 proposed_max_feerate as u64 * tx_weight / 1000)
4654 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4657 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4658 self.context.closing_fee_limits.clone().unwrap()
4661 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4662 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4663 /// this point if we're the funder we should send the initial closing_signed, and in any case
4664 /// shutdown should complete within a reasonable timeframe.
4665 fn closing_negotiation_ready(&self) -> bool {
4666 self.context.closing_negotiation_ready()
4669 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4670 /// an Err if no progress is being made and the channel should be force-closed instead.
4671 /// Should be called on a one-minute timer.
4672 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4673 if self.closing_negotiation_ready() {
4674 if self.context.closing_signed_in_flight {
4675 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4677 self.context.closing_signed_in_flight = true;
4683 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4684 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4685 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4686 where F::Target: FeeEstimator, L::Target: Logger
4688 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4689 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4690 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4691 // that closing_negotiation_ready checks this case (as well as a few others).
4692 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4693 return Ok((None, None, None));
4696 if !self.context.is_outbound() {
4697 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4698 return self.closing_signed(fee_estimator, &msg);
4700 return Ok((None, None, None));
4703 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4704 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4705 if self.context.expecting_peer_commitment_signed {
4706 return Ok((None, None, None));
4709 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4711 assert!(self.context.shutdown_scriptpubkey.is_some());
4712 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4713 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4714 our_min_fee, our_max_fee, total_fee_satoshis);
4716 match &self.context.holder_signer {
4717 ChannelSignerType::Ecdsa(ecdsa) => {
4719 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4720 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4722 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4723 Ok((Some(msgs::ClosingSigned {
4724 channel_id: self.context.channel_id,
4725 fee_satoshis: total_fee_satoshis,
4727 fee_range: Some(msgs::ClosingSignedFeeRange {
4728 min_fee_satoshis: our_min_fee,
4729 max_fee_satoshis: our_max_fee,
4733 // TODO (taproot|arik)
4739 // Marks a channel as waiting for a response from the counterparty. If it's not received
4740 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4742 fn mark_awaiting_response(&mut self) {
4743 self.context.sent_message_awaiting_response = Some(0);
4746 /// Determines whether we should disconnect the counterparty due to not receiving a response
4747 /// within our expected timeframe.
4749 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4750 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4751 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4754 // Don't disconnect when we're not waiting on a response.
4757 *ticks_elapsed += 1;
4758 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4762 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4763 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4765 if self.context.channel_state.is_peer_disconnected() {
4766 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4768 if self.context.channel_state.is_pre_funded_state() {
4769 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4770 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4771 // can do that via error message without getting a connection fail anyway...
4772 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4774 for htlc in self.context.pending_inbound_htlcs.iter() {
4775 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4776 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4779 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4781 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4782 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4785 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4786 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4787 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4790 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4793 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4794 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4795 // any further commitment updates after we set LocalShutdownSent.
4796 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
4798 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4801 assert!(send_shutdown);
4802 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4803 Ok(scriptpubkey) => scriptpubkey,
4804 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4806 if !shutdown_scriptpubkey.is_compatible(their_features) {
4807 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4809 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4814 // From here on out, we may not fail!
4816 self.context.channel_state.set_remote_shutdown_sent();
4817 self.context.update_time_counter += 1;
4819 let monitor_update = if update_shutdown_script {
4820 self.context.latest_monitor_update_id += 1;
4821 let monitor_update = ChannelMonitorUpdate {
4822 update_id: self.context.latest_monitor_update_id,
4823 counterparty_node_id: Some(self.context.counterparty_node_id),
4824 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4825 scriptpubkey: self.get_closing_scriptpubkey(),
4828 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4829 self.push_ret_blockable_mon_update(monitor_update)
4831 let shutdown = if send_shutdown {
4832 Some(msgs::Shutdown {
4833 channel_id: self.context.channel_id,
4834 scriptpubkey: self.get_closing_scriptpubkey(),
4838 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4839 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4840 // cell HTLCs and return them to fail the payment.
4841 self.context.holding_cell_update_fee = None;
4842 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4843 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4845 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4846 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4853 self.context.channel_state.set_local_shutdown_sent();
4854 self.context.update_time_counter += 1;
4856 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4859 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4860 let mut tx = closing_tx.trust().built_transaction().clone();
4862 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4864 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4865 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4866 let mut holder_sig = sig.serialize_der().to_vec();
4867 holder_sig.push(EcdsaSighashType::All as u8);
4868 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4869 cp_sig.push(EcdsaSighashType::All as u8);
4870 if funding_key[..] < counterparty_funding_key[..] {
4871 tx.input[0].witness.push(holder_sig);
4872 tx.input[0].witness.push(cp_sig);
4874 tx.input[0].witness.push(cp_sig);
4875 tx.input[0].witness.push(holder_sig);
4878 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4882 pub fn closing_signed<F: Deref>(
4883 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4884 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4885 where F::Target: FeeEstimator
4887 if !self.context.channel_state.is_both_sides_shutdown() {
4888 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4890 if self.context.channel_state.is_peer_disconnected() {
4891 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4893 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4894 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4896 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4897 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4900 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4901 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4904 if self.context.channel_state.is_monitor_update_in_progress() {
4905 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4906 return Ok((None, None, None));
4909 let funding_redeemscript = self.context.get_funding_redeemscript();
4910 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4911 if used_total_fee != msg.fee_satoshis {
4912 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4914 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4916 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4919 // The remote end may have decided to revoke their output due to inconsistent dust
4920 // limits, so check for that case by re-checking the signature here.
4921 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4922 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4923 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4927 for outp in closing_tx.trust().built_transaction().output.iter() {
4928 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4929 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4933 assert!(self.context.shutdown_scriptpubkey.is_some());
4934 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4935 if last_fee == msg.fee_satoshis {
4936 let shutdown_result = ShutdownResult {
4937 closure_reason: ClosureReason::CooperativeClosure,
4938 monitor_update: None,
4939 dropped_outbound_htlcs: Vec::new(),
4940 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4941 channel_id: self.context.channel_id,
4942 user_channel_id: self.context.user_id,
4943 channel_capacity_satoshis: self.context.channel_value_satoshis,
4944 counterparty_node_id: self.context.counterparty_node_id,
4945 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
4947 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4948 self.context.channel_state = ChannelState::ShutdownComplete;
4949 self.context.update_time_counter += 1;
4950 return Ok((None, Some(tx), Some(shutdown_result)));
4954 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4956 macro_rules! propose_fee {
4957 ($new_fee: expr) => {
4958 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4959 (closing_tx, $new_fee)
4961 self.build_closing_transaction($new_fee, false)
4964 return match &self.context.holder_signer {
4965 ChannelSignerType::Ecdsa(ecdsa) => {
4967 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4968 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4969 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4970 let shutdown_result = ShutdownResult {
4971 closure_reason: ClosureReason::CooperativeClosure,
4972 monitor_update: None,
4973 dropped_outbound_htlcs: Vec::new(),
4974 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4975 channel_id: self.context.channel_id,
4976 user_channel_id: self.context.user_id,
4977 channel_capacity_satoshis: self.context.channel_value_satoshis,
4978 counterparty_node_id: self.context.counterparty_node_id,
4979 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
4981 self.context.channel_state = ChannelState::ShutdownComplete;
4982 self.context.update_time_counter += 1;
4983 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4984 (Some(tx), Some(shutdown_result))
4989 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4990 Ok((Some(msgs::ClosingSigned {
4991 channel_id: self.context.channel_id,
4992 fee_satoshis: used_fee,
4994 fee_range: Some(msgs::ClosingSignedFeeRange {
4995 min_fee_satoshis: our_min_fee,
4996 max_fee_satoshis: our_max_fee,
4998 }), signed_tx, shutdown_result))
5000 // TODO (taproot|arik)
5007 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
5008 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
5009 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
5011 if max_fee_satoshis < our_min_fee {
5012 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
5014 if min_fee_satoshis > our_max_fee {
5015 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
5018 if !self.context.is_outbound() {
5019 // They have to pay, so pick the highest fee in the overlapping range.
5020 // We should never set an upper bound aside from their full balance
5021 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
5022 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
5024 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
5025 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
5026 msg.fee_satoshis, our_min_fee, our_max_fee)));
5028 // The proposed fee is in our acceptable range, accept it and broadcast!
5029 propose_fee!(msg.fee_satoshis);
5032 // Old fee style negotiation. We don't bother to enforce whether they are complying
5033 // with the "making progress" requirements, we just comply and hope for the best.
5034 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
5035 if msg.fee_satoshis > last_fee {
5036 if msg.fee_satoshis < our_max_fee {
5037 propose_fee!(msg.fee_satoshis);
5038 } else if last_fee < our_max_fee {
5039 propose_fee!(our_max_fee);
5041 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5044 if msg.fee_satoshis > our_min_fee {
5045 propose_fee!(msg.fee_satoshis);
5046 } else if last_fee > our_min_fee {
5047 propose_fee!(our_min_fee);
5049 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5053 if msg.fee_satoshis < our_min_fee {
5054 propose_fee!(our_min_fee);
5055 } else if msg.fee_satoshis > our_max_fee {
5056 propose_fee!(our_max_fee);
5058 propose_fee!(msg.fee_satoshis);
5064 fn internal_htlc_satisfies_config(
5065 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
5066 ) -> Result<(), (&'static str, u16)> {
5067 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
5068 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
5069 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
5070 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5072 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5073 0x1000 | 12, // fee_insufficient
5076 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5078 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5079 0x1000 | 13, // incorrect_cltv_expiry
5085 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5086 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5087 /// unsuccessful, falls back to the previous one if one exists.
5088 pub fn htlc_satisfies_config(
5089 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5090 ) -> Result<(), (&'static str, u16)> {
5091 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5093 if let Some(prev_config) = self.context.prev_config() {
5094 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5101 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5102 self.context.cur_holder_commitment_transaction_number + 1
5105 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5106 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
5109 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5110 self.context.cur_counterparty_commitment_transaction_number + 2
5114 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5115 &self.context.holder_signer
5119 pub fn get_value_stat(&self) -> ChannelValueStat {
5121 value_to_self_msat: self.context.value_to_self_msat,
5122 channel_value_msat: self.context.channel_value_satoshis * 1000,
5123 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5124 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5125 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5126 holding_cell_outbound_amount_msat: {
5128 for h in self.context.holding_cell_htlc_updates.iter() {
5130 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5138 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5139 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5143 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5144 /// Allowed in any state (including after shutdown)
5145 pub fn is_awaiting_monitor_update(&self) -> bool {
5146 self.context.channel_state.is_monitor_update_in_progress()
5149 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5150 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5151 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5152 self.context.blocked_monitor_updates[0].update.update_id - 1
5155 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5156 /// further blocked monitor update exists after the next.
5157 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5158 if self.context.blocked_monitor_updates.is_empty() { return None; }
5159 Some((self.context.blocked_monitor_updates.remove(0).update,
5160 !self.context.blocked_monitor_updates.is_empty()))
5163 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5164 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5165 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5166 -> Option<ChannelMonitorUpdate> {
5167 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5168 if !release_monitor {
5169 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5178 pub fn blocked_monitor_updates_pending(&self) -> usize {
5179 self.context.blocked_monitor_updates.len()
5182 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5183 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5184 /// transaction. If the channel is inbound, this implies simply that the channel has not
5186 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5187 if !self.is_awaiting_monitor_update() { return false; }
5189 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5190 if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
5192 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5193 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5194 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5197 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5198 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5199 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5200 // waiting for the initial monitor persistence. Thus, we check if our commitment
5201 // transaction numbers have both been iterated only exactly once (for the
5202 // funding_signed), and we're awaiting monitor update.
5204 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5205 // only way to get an awaiting-monitor-update state during initial funding is if the
5206 // initial monitor persistence is still pending).
5208 // Because deciding we're awaiting initial broadcast spuriously could result in
5209 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5210 // we hard-assert here, even in production builds.
5211 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5212 assert!(self.context.monitor_pending_channel_ready);
5213 assert_eq!(self.context.latest_monitor_update_id, 0);
5219 /// Returns true if our channel_ready has been sent
5220 pub fn is_our_channel_ready(&self) -> bool {
5221 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5222 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5225 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5226 pub fn received_shutdown(&self) -> bool {
5227 self.context.channel_state.is_remote_shutdown_sent()
5230 /// Returns true if we either initiated or agreed to shut down the channel.
5231 pub fn sent_shutdown(&self) -> bool {
5232 self.context.channel_state.is_local_shutdown_sent()
5235 /// Returns true if this channel is fully shut down. True here implies that no further actions
5236 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5237 /// will be handled appropriately by the chain monitor.
5238 pub fn is_shutdown(&self) -> bool {
5239 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5242 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5243 self.context.channel_update_status
5246 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5247 self.context.update_time_counter += 1;
5248 self.context.channel_update_status = status;
5251 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5253 // * always when a new block/transactions are confirmed with the new height
5254 // * when funding is signed with a height of 0
5255 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5259 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5260 if funding_tx_confirmations <= 0 {
5261 self.context.funding_tx_confirmation_height = 0;
5264 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5268 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5269 // channel_ready yet.
5270 if self.context.signer_pending_funding {
5274 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5275 // channel_ready until the entire batch is ready.
5276 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
5277 self.context.channel_state.set_our_channel_ready();
5279 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5280 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5281 self.context.update_time_counter += 1;
5283 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5284 // We got a reorg but not enough to trigger a force close, just ignore.
5287 if self.context.funding_tx_confirmation_height != 0 &&
5288 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5290 // We should never see a funding transaction on-chain until we've received
5291 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5292 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5293 // however, may do this and we shouldn't treat it as a bug.
5294 #[cfg(not(fuzzing))]
5295 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5296 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5297 self.context.channel_state.to_u32());
5299 // We got a reorg but not enough to trigger a force close, just ignore.
5303 if need_commitment_update {
5304 if !self.context.channel_state.is_monitor_update_in_progress() {
5305 if !self.context.channel_state.is_peer_disconnected() {
5306 let next_per_commitment_point =
5307 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5308 return Some(msgs::ChannelReady {
5309 channel_id: self.context.channel_id,
5310 next_per_commitment_point,
5311 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5315 self.context.monitor_pending_channel_ready = true;
5321 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5322 /// In the first case, we store the confirmation height and calculating the short channel id.
5323 /// In the second, we simply return an Err indicating we need to be force-closed now.
5324 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5325 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5326 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5327 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5329 NS::Target: NodeSigner,
5332 let mut msgs = (None, None);
5333 if let Some(funding_txo) = self.context.get_funding_txo() {
5334 for &(index_in_block, tx) in txdata.iter() {
5335 // Check if the transaction is the expected funding transaction, and if it is,
5336 // check that it pays the right amount to the right script.
5337 if self.context.funding_tx_confirmation_height == 0 {
5338 if tx.txid() == funding_txo.txid {
5339 let txo_idx = funding_txo.index as usize;
5340 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5341 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5342 if self.context.is_outbound() {
5343 // If we generated the funding transaction and it doesn't match what it
5344 // should, the client is really broken and we should just panic and
5345 // tell them off. That said, because hash collisions happen with high
5346 // probability in fuzzing mode, if we're fuzzing we just close the
5347 // channel and move on.
5348 #[cfg(not(fuzzing))]
5349 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5351 self.context.update_time_counter += 1;
5352 let err_reason = "funding tx had wrong script/value or output index";
5353 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5355 if self.context.is_outbound() {
5356 if !tx.is_coin_base() {
5357 for input in tx.input.iter() {
5358 if input.witness.is_empty() {
5359 // We generated a malleable funding transaction, implying we've
5360 // just exposed ourselves to funds loss to our counterparty.
5361 #[cfg(not(fuzzing))]
5362 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5367 self.context.funding_tx_confirmation_height = height;
5368 self.context.funding_tx_confirmed_in = Some(*block_hash);
5369 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5370 Ok(scid) => Some(scid),
5371 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5374 // If this is a coinbase transaction and not a 0-conf channel
5375 // we should update our min_depth to 100 to handle coinbase maturity
5376 if tx.is_coin_base() &&
5377 self.context.minimum_depth.unwrap_or(0) > 0 &&
5378 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5379 self.context.minimum_depth = Some(COINBASE_MATURITY);
5382 // If we allow 1-conf funding, we may need to check for channel_ready here and
5383 // send it immediately instead of waiting for a best_block_updated call (which
5384 // may have already happened for this block).
5385 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5386 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5387 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5388 msgs = (Some(channel_ready), announcement_sigs);
5391 for inp in tx.input.iter() {
5392 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5393 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5394 return Err(ClosureReason::CommitmentTxConfirmed);
5402 /// When a new block is connected, we check the height of the block against outbound holding
5403 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5404 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5405 /// handled by the ChannelMonitor.
5407 /// If we return Err, the channel may have been closed, at which point the standard
5408 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5411 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5413 pub fn best_block_updated<NS: Deref, L: Deref>(
5414 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5415 node_signer: &NS, user_config: &UserConfig, logger: &L
5416 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5418 NS::Target: NodeSigner,
5421 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5424 fn do_best_block_updated<NS: Deref, L: Deref>(
5425 &mut self, height: u32, highest_header_time: u32,
5426 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5427 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5429 NS::Target: NodeSigner,
5432 let mut timed_out_htlcs = Vec::new();
5433 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5434 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5436 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5437 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5439 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5440 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5441 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5449 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5451 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5452 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5453 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5455 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5456 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5459 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5460 self.context.channel_state.is_our_channel_ready() {
5461 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5462 if self.context.funding_tx_confirmation_height == 0 {
5463 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5464 // zero if it has been reorged out, however in either case, our state flags
5465 // indicate we've already sent a channel_ready
5466 funding_tx_confirmations = 0;
5469 // If we've sent channel_ready (or have both sent and received channel_ready), and
5470 // the funding transaction has become unconfirmed,
5471 // close the channel and hope we can get the latest state on chain (because presumably
5472 // the funding transaction is at least still in the mempool of most nodes).
5474 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5475 // 0-conf channel, but not doing so may lead to the
5476 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5478 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5479 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5480 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5481 return Err(ClosureReason::ProcessingError { err: err_reason });
5483 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5484 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5485 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5486 // If funding_tx_confirmed_in is unset, the channel must not be active
5487 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5488 assert!(!self.context.channel_state.is_our_channel_ready());
5489 return Err(ClosureReason::FundingTimedOut);
5492 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5493 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5495 Ok((None, timed_out_htlcs, announcement_sigs))
5498 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5499 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5500 /// before the channel has reached channel_ready and we can just wait for more blocks.
5501 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5502 if self.context.funding_tx_confirmation_height != 0 {
5503 // We handle the funding disconnection by calling best_block_updated with a height one
5504 // below where our funding was connected, implying a reorg back to conf_height - 1.
5505 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5506 // We use the time field to bump the current time we set on channel updates if its
5507 // larger. If we don't know that time has moved forward, we can just set it to the last
5508 // time we saw and it will be ignored.
5509 let best_time = self.context.update_time_counter;
5510 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
5511 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5512 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5513 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5514 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5520 // We never learned about the funding confirmation anyway, just ignore
5525 // Methods to get unprompted messages to send to the remote end (or where we already returned
5526 // something in the handler for the message that prompted this message):
5528 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5529 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5530 /// directions). Should be used for both broadcasted announcements and in response to an
5531 /// AnnouncementSignatures message from the remote peer.
5533 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5536 /// This will only return ChannelError::Ignore upon failure.
5538 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5539 fn get_channel_announcement<NS: Deref>(
5540 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5541 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5542 if !self.context.config.announced_channel {
5543 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5545 if !self.context.is_usable() {
5546 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5549 let short_channel_id = self.context.get_short_channel_id()
5550 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5551 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5552 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5553 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5554 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5556 let msg = msgs::UnsignedChannelAnnouncement {
5557 features: channelmanager::provided_channel_features(&user_config),
5560 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5561 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5562 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5563 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5564 excess_data: Vec::new(),
5570 fn get_announcement_sigs<NS: Deref, L: Deref>(
5571 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5572 best_block_height: u32, logger: &L
5573 ) -> Option<msgs::AnnouncementSignatures>
5575 NS::Target: NodeSigner,
5578 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5582 if !self.context.is_usable() {
5586 if self.context.channel_state.is_peer_disconnected() {
5587 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5591 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5595 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5596 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5599 log_trace!(logger, "{:?}", e);
5603 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5605 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5610 match &self.context.holder_signer {
5611 ChannelSignerType::Ecdsa(ecdsa) => {
5612 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5614 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5619 let short_channel_id = match self.context.get_short_channel_id() {
5621 None => return None,
5624 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5626 Some(msgs::AnnouncementSignatures {
5627 channel_id: self.context.channel_id(),
5629 node_signature: our_node_sig,
5630 bitcoin_signature: our_bitcoin_sig,
5633 // TODO (taproot|arik)
5639 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5641 fn sign_channel_announcement<NS: Deref>(
5642 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5643 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5644 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5645 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5646 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5647 let were_node_one = announcement.node_id_1 == our_node_key;
5649 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5650 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5651 match &self.context.holder_signer {
5652 ChannelSignerType::Ecdsa(ecdsa) => {
5653 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5654 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5655 Ok(msgs::ChannelAnnouncement {
5656 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5657 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5658 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5659 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5660 contents: announcement,
5663 // TODO (taproot|arik)
5668 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5672 /// Processes an incoming announcement_signatures message, providing a fully-signed
5673 /// channel_announcement message which we can broadcast and storing our counterparty's
5674 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5675 pub fn announcement_signatures<NS: Deref>(
5676 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5677 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5678 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5679 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5681 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5683 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5684 return Err(ChannelError::Close(format!(
5685 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5686 &announcement, self.context.get_counterparty_node_id())));
5688 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5689 return Err(ChannelError::Close(format!(
5690 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5691 &announcement, self.context.counterparty_funding_pubkey())));
5694 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5695 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5696 return Err(ChannelError::Ignore(
5697 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5700 self.sign_channel_announcement(node_signer, announcement)
5703 /// Gets a signed channel_announcement for this channel, if we previously received an
5704 /// announcement_signatures from our counterparty.
5705 pub fn get_signed_channel_announcement<NS: Deref>(
5706 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5707 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5708 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5711 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5713 Err(_) => return None,
5715 match self.sign_channel_announcement(node_signer, announcement) {
5716 Ok(res) => Some(res),
5721 /// May panic if called on a channel that wasn't immediately-previously
5722 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5723 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5724 assert!(self.context.channel_state.is_peer_disconnected());
5725 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5726 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5727 // current to_remote balances. However, it no longer has any use, and thus is now simply
5728 // set to a dummy (but valid, as required by the spec) public key.
5729 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5730 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5731 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5732 let mut pk = [2; 33]; pk[1] = 0xff;
5733 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5734 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5735 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5736 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5739 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5742 self.mark_awaiting_response();
5743 msgs::ChannelReestablish {
5744 channel_id: self.context.channel_id(),
5745 // The protocol has two different commitment number concepts - the "commitment
5746 // transaction number", which starts from 0 and counts up, and the "revocation key
5747 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5748 // commitment transaction numbers by the index which will be used to reveal the
5749 // revocation key for that commitment transaction, which means we have to convert them
5750 // to protocol-level commitment numbers here...
5752 // next_local_commitment_number is the next commitment_signed number we expect to
5753 // receive (indicating if they need to resend one that we missed).
5754 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5755 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5756 // receive, however we track it by the next commitment number for a remote transaction
5757 // (which is one further, as they always revoke previous commitment transaction, not
5758 // the one we send) so we have to decrement by 1. Note that if
5759 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5760 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
5762 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5763 your_last_per_commitment_secret: remote_last_secret,
5764 my_current_per_commitment_point: dummy_pubkey,
5765 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5766 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5767 // txid of that interactive transaction, else we MUST NOT set it.
5768 next_funding_txid: None,
5773 // Send stuff to our remote peers:
5775 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5776 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5777 /// commitment update.
5779 /// `Err`s will only be [`ChannelError::Ignore`].
5780 pub fn queue_add_htlc<F: Deref, L: Deref>(
5781 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5782 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5783 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5784 ) -> Result<(), ChannelError>
5785 where F::Target: FeeEstimator, L::Target: Logger
5788 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5789 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5790 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5792 if let ChannelError::Ignore(_) = err { /* fine */ }
5793 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5798 /// Adds a pending outbound HTLC to this channel, note that you probably want
5799 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5801 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5803 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5804 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5806 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5807 /// we may not yet have sent the previous commitment update messages and will need to
5808 /// regenerate them.
5810 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5811 /// on this [`Channel`] if `force_holding_cell` is false.
5813 /// `Err`s will only be [`ChannelError::Ignore`].
5814 fn send_htlc<F: Deref, L: Deref>(
5815 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5816 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5817 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5818 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5819 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5820 where F::Target: FeeEstimator, L::Target: Logger
5822 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5823 self.context.channel_state.is_local_shutdown_sent() ||
5824 self.context.channel_state.is_remote_shutdown_sent()
5826 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5828 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5829 if amount_msat > channel_total_msat {
5830 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5833 if amount_msat == 0 {
5834 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5837 let available_balances = self.context.get_available_balances(fee_estimator);
5838 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5839 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5840 available_balances.next_outbound_htlc_minimum_msat)));
5843 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5844 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5845 available_balances.next_outbound_htlc_limit_msat)));
5848 if self.context.channel_state.is_peer_disconnected() {
5849 // Note that this should never really happen, if we're !is_live() on receipt of an
5850 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5851 // the user to send directly into a !is_live() channel. However, if we
5852 // disconnected during the time the previous hop was doing the commitment dance we may
5853 // end up getting here after the forwarding delay. In any case, returning an
5854 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5855 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5858 let need_holding_cell = self.context.channel_state.should_force_holding_cell();
5859 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5860 payment_hash, amount_msat,
5861 if force_holding_cell { "into holding cell" }
5862 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5863 else { "to peer" });
5865 if need_holding_cell {
5866 force_holding_cell = true;
5869 // Now update local state:
5870 if force_holding_cell {
5871 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5876 onion_routing_packet,
5883 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5884 htlc_id: self.context.next_holder_htlc_id,
5886 payment_hash: payment_hash.clone(),
5888 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5894 let res = msgs::UpdateAddHTLC {
5895 channel_id: self.context.channel_id,
5896 htlc_id: self.context.next_holder_htlc_id,
5900 onion_routing_packet,
5904 self.context.next_holder_htlc_id += 1;
5909 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5910 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5911 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5912 // fail to generate this, we still are at least at a position where upgrading their status
5914 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5915 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5916 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5918 if let Some(state) = new_state {
5919 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5923 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5924 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5925 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5926 // Grab the preimage, if it exists, instead of cloning
5927 let mut reason = OutboundHTLCOutcome::Success(None);
5928 mem::swap(outcome, &mut reason);
5929 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5932 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5933 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5934 debug_assert!(!self.context.is_outbound());
5935 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5936 self.context.feerate_per_kw = feerate;
5937 self.context.pending_update_fee = None;
5940 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5942 let (mut htlcs_ref, counterparty_commitment_tx) =
5943 self.build_commitment_no_state_update(logger);
5944 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5945 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5946 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5948 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5949 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5952 self.context.latest_monitor_update_id += 1;
5953 let monitor_update = ChannelMonitorUpdate {
5954 update_id: self.context.latest_monitor_update_id,
5955 counterparty_node_id: Some(self.context.counterparty_node_id),
5956 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5957 commitment_txid: counterparty_commitment_txid,
5958 htlc_outputs: htlcs.clone(),
5959 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5960 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5961 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5962 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5963 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5966 self.context.channel_state.set_awaiting_remote_revoke();
5970 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5971 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5972 where L::Target: Logger
5974 let counterparty_keys = self.context.build_remote_transaction_keys();
5975 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5976 let counterparty_commitment_tx = commitment_stats.tx;
5978 #[cfg(any(test, fuzzing))]
5980 if !self.context.is_outbound() {
5981 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5982 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5983 if let Some(info) = projected_commit_tx_info {
5984 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5985 if info.total_pending_htlcs == total_pending_htlcs
5986 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5987 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5988 && info.feerate == self.context.feerate_per_kw {
5989 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5990 assert_eq!(actual_fee, info.fee);
5996 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5999 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
6000 /// generation when we shouldn't change HTLC/channel state.
6001 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
6002 // Get the fee tests from `build_commitment_no_state_update`
6003 #[cfg(any(test, fuzzing))]
6004 self.build_commitment_no_state_update(logger);
6006 let counterparty_keys = self.context.build_remote_transaction_keys();
6007 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6008 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
6010 match &self.context.holder_signer {
6011 ChannelSignerType::Ecdsa(ecdsa) => {
6012 let (signature, htlc_signatures);
6015 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
6016 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
6020 let res = ecdsa.sign_counterparty_commitment(
6021 &commitment_stats.tx,
6022 commitment_stats.inbound_htlc_preimages,
6023 commitment_stats.outbound_htlc_preimages,
6024 &self.context.secp_ctx,
6025 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
6027 htlc_signatures = res.1;
6029 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
6030 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
6031 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
6032 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
6034 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6035 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6036 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6037 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
6038 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
6039 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
6043 Ok((msgs::CommitmentSigned {
6044 channel_id: self.context.channel_id,
6048 partial_signature_with_nonce: None,
6049 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6051 // TODO (taproot|arik)
6057 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6058 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6060 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6061 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6062 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
6063 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
6064 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6065 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6066 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
6067 where F::Target: FeeEstimator, L::Target: Logger
6069 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
6070 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
6071 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6074 let monitor_update = self.build_commitment_no_status_check(logger);
6075 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6076 Ok(self.push_ret_blockable_mon_update(monitor_update))
6082 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
6084 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
6085 let new_forwarding_info = Some(CounterpartyForwardingInfo {
6086 fee_base_msat: msg.contents.fee_base_msat,
6087 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6088 cltv_expiry_delta: msg.contents.cltv_expiry_delta
6090 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
6092 self.context.counterparty_forwarding_info = new_forwarding_info;
6098 /// Begins the shutdown process, getting a message for the remote peer and returning all
6099 /// holding cell HTLCs for payment failure.
6100 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6101 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6102 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6104 for htlc in self.context.pending_outbound_htlcs.iter() {
6105 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6106 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6109 if self.context.channel_state.is_local_shutdown_sent() {
6110 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6112 else if self.context.channel_state.is_remote_shutdown_sent() {
6113 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6115 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6116 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6118 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6119 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6120 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6123 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6126 // use override shutdown script if provided
6127 let shutdown_scriptpubkey = match override_shutdown_script {
6128 Some(script) => script,
6130 // otherwise, use the shutdown scriptpubkey provided by the signer
6131 match signer_provider.get_shutdown_scriptpubkey() {
6132 Ok(scriptpubkey) => scriptpubkey,
6133 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6137 if !shutdown_scriptpubkey.is_compatible(their_features) {
6138 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6140 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6145 // From here on out, we may not fail!
6146 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6147 self.context.channel_state.set_local_shutdown_sent();
6148 self.context.update_time_counter += 1;
6150 let monitor_update = if update_shutdown_script {
6151 self.context.latest_monitor_update_id += 1;
6152 let monitor_update = ChannelMonitorUpdate {
6153 update_id: self.context.latest_monitor_update_id,
6154 counterparty_node_id: Some(self.context.counterparty_node_id),
6155 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6156 scriptpubkey: self.get_closing_scriptpubkey(),
6159 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6160 self.push_ret_blockable_mon_update(monitor_update)
6162 let shutdown = msgs::Shutdown {
6163 channel_id: self.context.channel_id,
6164 scriptpubkey: self.get_closing_scriptpubkey(),
6167 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6168 // our shutdown until we've committed all of the pending changes.
6169 self.context.holding_cell_update_fee = None;
6170 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6171 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6173 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6174 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6181 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6182 "we can't both complete shutdown and return a monitor update");
6184 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6187 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6188 self.context.holding_cell_htlc_updates.iter()
6189 .flat_map(|htlc_update| {
6191 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6192 => Some((source, payment_hash)),
6196 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6200 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6201 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6202 pub context: ChannelContext<SP>,
6203 pub unfunded_context: UnfundedChannelContext,
6206 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6207 pub fn new<ES: Deref, F: Deref>(
6208 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6209 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6210 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6211 ) -> Result<OutboundV1Channel<SP>, APIError>
6212 where ES::Target: EntropySource,
6213 F::Target: FeeEstimator
6215 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6216 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6217 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6218 let pubkeys = holder_signer.pubkeys().clone();
6220 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6221 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6223 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6224 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6226 let channel_value_msat = channel_value_satoshis * 1000;
6227 if push_msat > channel_value_msat {
6228 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6230 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6231 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6233 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6234 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6235 // Protocol level safety check in place, although it should never happen because
6236 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6237 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6240 let channel_type = Self::get_initial_channel_type(&config, their_features);
6241 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6243 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6244 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6246 (ConfirmationTarget::NonAnchorChannelFee, 0)
6248 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6250 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6251 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6252 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6253 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6256 let mut secp_ctx = Secp256k1::new();
6257 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6259 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6260 match signer_provider.get_shutdown_scriptpubkey() {
6261 Ok(scriptpubkey) => Some(scriptpubkey),
6262 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6266 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6267 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6268 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6272 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6273 Ok(script) => script,
6274 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6277 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6280 context: ChannelContext {
6283 config: LegacyChannelConfig {
6284 options: config.channel_config.clone(),
6285 announced_channel: config.channel_handshake_config.announced_channel,
6286 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6291 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6293 channel_id: temporary_channel_id,
6294 temporary_channel_id: Some(temporary_channel_id),
6295 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6296 announcement_sigs_state: AnnouncementSigsState::NotSent,
6298 channel_value_satoshis,
6300 latest_monitor_update_id: 0,
6302 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6303 shutdown_scriptpubkey,
6306 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6307 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6310 pending_inbound_htlcs: Vec::new(),
6311 pending_outbound_htlcs: Vec::new(),
6312 holding_cell_htlc_updates: Vec::new(),
6313 pending_update_fee: None,
6314 holding_cell_update_fee: None,
6315 next_holder_htlc_id: 0,
6316 next_counterparty_htlc_id: 0,
6317 update_time_counter: 1,
6319 resend_order: RAACommitmentOrder::CommitmentFirst,
6321 monitor_pending_channel_ready: false,
6322 monitor_pending_revoke_and_ack: false,
6323 monitor_pending_commitment_signed: false,
6324 monitor_pending_forwards: Vec::new(),
6325 monitor_pending_failures: Vec::new(),
6326 monitor_pending_finalized_fulfills: Vec::new(),
6328 signer_pending_commitment_update: false,
6329 signer_pending_funding: false,
6331 #[cfg(debug_assertions)]
6332 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6333 #[cfg(debug_assertions)]
6334 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6336 last_sent_closing_fee: None,
6337 pending_counterparty_closing_signed: None,
6338 expecting_peer_commitment_signed: false,
6339 closing_fee_limits: None,
6340 target_closing_feerate_sats_per_kw: None,
6342 funding_tx_confirmed_in: None,
6343 funding_tx_confirmation_height: 0,
6344 short_channel_id: None,
6345 channel_creation_height: current_chain_height,
6347 feerate_per_kw: commitment_feerate,
6348 counterparty_dust_limit_satoshis: 0,
6349 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6350 counterparty_max_htlc_value_in_flight_msat: 0,
6351 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6352 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6353 holder_selected_channel_reserve_satoshis,
6354 counterparty_htlc_minimum_msat: 0,
6355 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6356 counterparty_max_accepted_htlcs: 0,
6357 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6358 minimum_depth: None, // Filled in in accept_channel
6360 counterparty_forwarding_info: None,
6362 channel_transaction_parameters: ChannelTransactionParameters {
6363 holder_pubkeys: pubkeys,
6364 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6365 is_outbound_from_holder: true,
6366 counterparty_parameters: None,
6367 funding_outpoint: None,
6368 channel_type_features: channel_type.clone()
6370 funding_transaction: None,
6371 is_batch_funding: None,
6373 counterparty_cur_commitment_point: None,
6374 counterparty_prev_commitment_point: None,
6375 counterparty_node_id,
6377 counterparty_shutdown_scriptpubkey: None,
6379 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6381 channel_update_status: ChannelUpdateStatus::Enabled,
6382 closing_signed_in_flight: false,
6384 announcement_sigs: None,
6386 #[cfg(any(test, fuzzing))]
6387 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6388 #[cfg(any(test, fuzzing))]
6389 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6391 workaround_lnd_bug_4006: None,
6392 sent_message_awaiting_response: None,
6394 latest_inbound_scid_alias: None,
6395 outbound_scid_alias,
6397 channel_pending_event_emitted: false,
6398 channel_ready_event_emitted: false,
6400 #[cfg(any(test, fuzzing))]
6401 historical_inbound_htlc_fulfills: HashSet::new(),
6406 blocked_monitor_updates: Vec::new(),
6408 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6412 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
6413 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6414 let counterparty_keys = self.context.build_remote_transaction_keys();
6415 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6416 let signature = match &self.context.holder_signer {
6417 // TODO (taproot|arik): move match into calling method for Taproot
6418 ChannelSignerType::Ecdsa(ecdsa) => {
6419 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
6420 .map(|(sig, _)| sig).ok()?
6422 // TODO (taproot|arik)
6427 if self.context.signer_pending_funding {
6428 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
6429 self.context.signer_pending_funding = false;
6432 Some(msgs::FundingCreated {
6433 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
6434 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
6435 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
6438 partial_signature_with_nonce: None,
6440 next_local_nonce: None,
6444 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6445 /// a funding_created message for the remote peer.
6446 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6447 /// or if called on an inbound channel.
6448 /// Note that channel_id changes during this call!
6449 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6450 /// If an Err is returned, it is a ChannelError::Close.
6451 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6452 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
6453 if !self.context.is_outbound() {
6454 panic!("Tried to create outbound funding_created message on an inbound channel!");
6457 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6458 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6460 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6462 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6463 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6464 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6465 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6468 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6469 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6471 // Now that we're past error-generating stuff, update our local state:
6473 self.context.channel_state = ChannelState::FundingNegotiated;
6474 self.context.channel_id = funding_txo.to_channel_id();
6476 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6477 // We can skip this if it is a zero-conf channel.
6478 if funding_transaction.is_coin_base() &&
6479 self.context.minimum_depth.unwrap_or(0) > 0 &&
6480 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6481 self.context.minimum_depth = Some(COINBASE_MATURITY);
6484 self.context.funding_transaction = Some(funding_transaction);
6485 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6487 let funding_created = self.get_funding_created_msg(logger);
6488 if funding_created.is_none() {
6489 #[cfg(not(async_signing))] {
6490 panic!("Failed to get signature for new funding creation");
6492 #[cfg(async_signing)] {
6493 if !self.context.signer_pending_funding {
6494 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6495 self.context.signer_pending_funding = true;
6503 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6504 // The default channel type (ie the first one we try) depends on whether the channel is
6505 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6506 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6507 // with no other changes, and fall back to `only_static_remotekey`.
6508 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6509 if !config.channel_handshake_config.announced_channel &&
6510 config.channel_handshake_config.negotiate_scid_privacy &&
6511 their_features.supports_scid_privacy() {
6512 ret.set_scid_privacy_required();
6515 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6516 // set it now. If they don't understand it, we'll fall back to our default of
6517 // `only_static_remotekey`.
6518 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6519 their_features.supports_anchors_zero_fee_htlc_tx() {
6520 ret.set_anchors_zero_fee_htlc_tx_required();
6526 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6527 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6528 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6529 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6530 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6531 ) -> Result<msgs::OpenChannel, ()>
6533 F::Target: FeeEstimator
6535 if !self.context.is_outbound() ||
6537 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6538 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6543 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6544 // We've exhausted our options
6547 // We support opening a few different types of channels. Try removing our additional
6548 // features one by one until we've either arrived at our default or the counterparty has
6551 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6552 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6553 // checks whether the counterparty supports every feature, this would only happen if the
6554 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6556 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6557 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6558 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6559 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6560 } else if self.context.channel_type.supports_scid_privacy() {
6561 self.context.channel_type.clear_scid_privacy();
6563 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6565 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6566 Ok(self.get_open_channel(chain_hash))
6569 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6570 if !self.context.is_outbound() {
6571 panic!("Tried to open a channel for an inbound channel?");
6573 if self.context.have_received_message() {
6574 panic!("Cannot generate an open_channel after we've moved forward");
6577 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6578 panic!("Tried to send an open_channel for a channel that has already advanced");
6581 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6582 let keys = self.context.get_holder_pubkeys();
6586 temporary_channel_id: self.context.channel_id,
6587 funding_satoshis: self.context.channel_value_satoshis,
6588 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6589 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6590 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6591 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6592 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6593 feerate_per_kw: self.context.feerate_per_kw as u32,
6594 to_self_delay: self.context.get_holder_selected_contest_delay(),
6595 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6596 funding_pubkey: keys.funding_pubkey,
6597 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6598 payment_point: keys.payment_point,
6599 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6600 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6601 first_per_commitment_point,
6602 channel_flags: if self.context.config.announced_channel {1} else {0},
6603 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6604 Some(script) => script.clone().into_inner(),
6605 None => Builder::new().into_script(),
6607 channel_type: Some(self.context.channel_type.clone()),
6612 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6613 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6615 // Check sanity of message fields:
6616 if !self.context.is_outbound() {
6617 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6619 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
6620 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6622 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6623 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6625 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6626 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6628 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6629 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6631 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6632 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6633 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6635 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6636 if msg.htlc_minimum_msat >= full_channel_value_msat {
6637 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6639 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6640 if msg.to_self_delay > max_delay_acceptable {
6641 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6643 if msg.max_accepted_htlcs < 1 {
6644 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6646 if msg.max_accepted_htlcs > MAX_HTLCS {
6647 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6650 // Now check against optional parameters as set by config...
6651 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6652 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6654 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6655 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6657 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6658 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6660 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6661 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6663 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6664 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6666 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6667 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6669 if msg.minimum_depth > peer_limits.max_minimum_depth {
6670 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6673 if let Some(ty) = &msg.channel_type {
6674 if *ty != self.context.channel_type {
6675 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6677 } else if their_features.supports_channel_type() {
6678 // Assume they've accepted the channel type as they said they understand it.
6680 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6681 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6682 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6684 self.context.channel_type = channel_type.clone();
6685 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6688 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6689 match &msg.shutdown_scriptpubkey {
6690 &Some(ref script) => {
6691 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6692 if script.len() == 0 {
6695 if !script::is_bolt2_compliant(&script, their_features) {
6696 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6698 Some(script.clone())
6701 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6703 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6708 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6709 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6710 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6711 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6712 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6714 if peer_limits.trust_own_funding_0conf {
6715 self.context.minimum_depth = Some(msg.minimum_depth);
6717 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6720 let counterparty_pubkeys = ChannelPublicKeys {
6721 funding_pubkey: msg.funding_pubkey,
6722 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6723 payment_point: msg.payment_point,
6724 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6725 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6728 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6729 selected_contest_delay: msg.to_self_delay,
6730 pubkeys: counterparty_pubkeys,
6733 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6734 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6736 self.context.channel_state = ChannelState::NegotiatingFunding(
6737 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6739 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6744 /// Handles a funding_signed message from the remote end.
6745 /// If this call is successful, broadcast the funding transaction (and not before!)
6746 pub fn funding_signed<L: Deref>(
6747 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
6748 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
6752 if !self.context.is_outbound() {
6753 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
6755 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
6756 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
6758 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6759 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6760 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6761 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6764 let funding_script = self.context.get_funding_redeemscript();
6766 let counterparty_keys = self.context.build_remote_transaction_keys();
6767 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6768 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6769 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6771 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6772 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6774 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6775 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
6777 let trusted_tx = initial_commitment_tx.trust();
6778 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6779 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6780 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
6781 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
6782 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
6786 let holder_commitment_tx = HolderCommitmentTransaction::new(
6787 initial_commitment_tx,
6790 &self.context.get_holder_pubkeys().funding_pubkey,
6791 self.context.counterparty_funding_pubkey()
6795 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
6796 if validated.is_err() {
6797 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6800 let funding_redeemscript = self.context.get_funding_redeemscript();
6801 let funding_txo = self.context.get_funding_txo().unwrap();
6802 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6803 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6804 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6805 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6806 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6807 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6808 shutdown_script, self.context.get_holder_selected_contest_delay(),
6809 &self.context.destination_script, (funding_txo, funding_txo_script),
6810 &self.context.channel_transaction_parameters,
6811 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6813 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6814 channel_monitor.provide_initial_counterparty_commitment_tx(
6815 counterparty_initial_bitcoin_tx.txid, Vec::new(),
6816 self.context.cur_counterparty_commitment_transaction_number,
6817 self.context.counterparty_cur_commitment_point.unwrap(),
6818 counterparty_initial_commitment_tx.feerate_per_kw(),
6819 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6820 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6822 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
6823 if self.context.is_batch_funding() {
6824 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
6826 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
6828 self.context.cur_holder_commitment_transaction_number -= 1;
6829 self.context.cur_counterparty_commitment_transaction_number -= 1;
6831 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
6833 let mut channel = Channel { context: self.context };
6835 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6836 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6837 Ok((channel, channel_monitor))
6840 /// Indicates that the signer may have some signatures for us, so we should retry if we're
6842 #[cfg(async_signing)]
6843 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6844 if self.context.signer_pending_funding && self.context.is_outbound() {
6845 log_trace!(logger, "Signer unblocked a funding_created");
6846 self.get_funding_created_msg(logger)
6851 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6852 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6853 pub context: ChannelContext<SP>,
6854 pub unfunded_context: UnfundedChannelContext,
6857 /// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
6858 /// [`msgs::OpenChannel`].
6859 pub(super) fn channel_type_from_open_channel(
6860 msg: &msgs::OpenChannel, their_features: &InitFeatures,
6861 our_supported_features: &ChannelTypeFeatures
6862 ) -> Result<ChannelTypeFeatures, ChannelError> {
6863 if let Some(channel_type) = &msg.channel_type {
6864 if channel_type.supports_any_optional_bits() {
6865 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6868 // We only support the channel types defined by the `ChannelManager` in
6869 // `provided_channel_type_features`. The channel type must always support
6870 // `static_remote_key`.
6871 if !channel_type.requires_static_remote_key() {
6872 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6874 // Make sure we support all of the features behind the channel type.
6875 if !channel_type.is_subset(our_supported_features) {
6876 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6878 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6879 if channel_type.requires_scid_privacy() && announced_channel {
6880 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6882 Ok(channel_type.clone())
6884 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6885 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6886 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6892 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6893 /// Creates a new channel from a remote sides' request for one.
6894 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6895 pub fn new<ES: Deref, F: Deref, L: Deref>(
6896 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6897 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6898 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6899 current_chain_height: u32, logger: &L, is_0conf: bool,
6900 ) -> Result<InboundV1Channel<SP>, ChannelError>
6901 where ES::Target: EntropySource,
6902 F::Target: FeeEstimator,
6905 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6906 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6908 // First check the channel type is known, failing before we do anything else if we don't
6909 // support this channel type.
6910 let channel_type = channel_type_from_open_channel(msg, their_features, our_supported_features)?;
6912 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6913 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6914 let pubkeys = holder_signer.pubkeys().clone();
6915 let counterparty_pubkeys = ChannelPublicKeys {
6916 funding_pubkey: msg.funding_pubkey,
6917 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6918 payment_point: msg.payment_point,
6919 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6920 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6923 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6924 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6927 // Check sanity of message fields:
6928 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6929 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6931 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6932 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6934 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6935 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6937 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6938 if msg.push_msat > full_channel_value_msat {
6939 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6941 if msg.dust_limit_satoshis > msg.funding_satoshis {
6942 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6944 if msg.htlc_minimum_msat >= full_channel_value_msat {
6945 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6947 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6949 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6950 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6951 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6953 if msg.max_accepted_htlcs < 1 {
6954 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6956 if msg.max_accepted_htlcs > MAX_HTLCS {
6957 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6960 // Now check against optional parameters as set by config...
6961 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6962 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6964 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6965 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6967 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6968 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6970 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6971 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6973 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6974 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6976 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6977 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6979 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6980 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6983 // Convert things into internal flags and prep our state:
6985 if config.channel_handshake_limits.force_announced_channel_preference {
6986 if config.channel_handshake_config.announced_channel != announced_channel {
6987 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6991 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6992 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6993 // Protocol level safety check in place, although it should never happen because
6994 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6995 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6997 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6998 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
7000 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7001 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
7002 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
7004 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
7005 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
7008 // check if the funder's amount for the initial commitment tx is sufficient
7009 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
7010 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
7011 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
7015 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
7016 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
7017 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
7018 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
7021 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
7022 // While it's reasonable for us to not meet the channel reserve initially (if they don't
7023 // want to push much to us), our counterparty should always have more than our reserve.
7024 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
7025 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
7028 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7029 match &msg.shutdown_scriptpubkey {
7030 &Some(ref script) => {
7031 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7032 if script.len() == 0 {
7035 if !script::is_bolt2_compliant(&script, their_features) {
7036 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
7038 Some(script.clone())
7041 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7043 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7048 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
7049 match signer_provider.get_shutdown_scriptpubkey() {
7050 Ok(scriptpubkey) => Some(scriptpubkey),
7051 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
7055 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
7056 if !shutdown_scriptpubkey.is_compatible(&their_features) {
7057 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
7061 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
7062 Ok(script) => script,
7063 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
7066 let mut secp_ctx = Secp256k1::new();
7067 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7069 let minimum_depth = if is_0conf {
7072 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
7076 context: ChannelContext {
7079 config: LegacyChannelConfig {
7080 options: config.channel_config.clone(),
7082 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
7087 inbound_handshake_limits_override: None,
7089 temporary_channel_id: Some(msg.temporary_channel_id),
7090 channel_id: msg.temporary_channel_id,
7091 channel_state: ChannelState::NegotiatingFunding(
7092 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7094 announcement_sigs_state: AnnouncementSigsState::NotSent,
7097 latest_monitor_update_id: 0,
7099 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7100 shutdown_scriptpubkey,
7103 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7104 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7105 value_to_self_msat: msg.push_msat,
7107 pending_inbound_htlcs: Vec::new(),
7108 pending_outbound_htlcs: Vec::new(),
7109 holding_cell_htlc_updates: Vec::new(),
7110 pending_update_fee: None,
7111 holding_cell_update_fee: None,
7112 next_holder_htlc_id: 0,
7113 next_counterparty_htlc_id: 0,
7114 update_time_counter: 1,
7116 resend_order: RAACommitmentOrder::CommitmentFirst,
7118 monitor_pending_channel_ready: false,
7119 monitor_pending_revoke_and_ack: false,
7120 monitor_pending_commitment_signed: false,
7121 monitor_pending_forwards: Vec::new(),
7122 monitor_pending_failures: Vec::new(),
7123 monitor_pending_finalized_fulfills: Vec::new(),
7125 signer_pending_commitment_update: false,
7126 signer_pending_funding: false,
7128 #[cfg(debug_assertions)]
7129 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7130 #[cfg(debug_assertions)]
7131 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7133 last_sent_closing_fee: None,
7134 pending_counterparty_closing_signed: None,
7135 expecting_peer_commitment_signed: false,
7136 closing_fee_limits: None,
7137 target_closing_feerate_sats_per_kw: None,
7139 funding_tx_confirmed_in: None,
7140 funding_tx_confirmation_height: 0,
7141 short_channel_id: None,
7142 channel_creation_height: current_chain_height,
7144 feerate_per_kw: msg.feerate_per_kw,
7145 channel_value_satoshis: msg.funding_satoshis,
7146 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
7147 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7148 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
7149 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
7150 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7151 holder_selected_channel_reserve_satoshis,
7152 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
7153 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7154 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
7155 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7158 counterparty_forwarding_info: None,
7160 channel_transaction_parameters: ChannelTransactionParameters {
7161 holder_pubkeys: pubkeys,
7162 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7163 is_outbound_from_holder: false,
7164 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7165 selected_contest_delay: msg.to_self_delay,
7166 pubkeys: counterparty_pubkeys,
7168 funding_outpoint: None,
7169 channel_type_features: channel_type.clone()
7171 funding_transaction: None,
7172 is_batch_funding: None,
7174 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7175 counterparty_prev_commitment_point: None,
7176 counterparty_node_id,
7178 counterparty_shutdown_scriptpubkey,
7180 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7182 channel_update_status: ChannelUpdateStatus::Enabled,
7183 closing_signed_in_flight: false,
7185 announcement_sigs: None,
7187 #[cfg(any(test, fuzzing))]
7188 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7189 #[cfg(any(test, fuzzing))]
7190 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7192 workaround_lnd_bug_4006: None,
7193 sent_message_awaiting_response: None,
7195 latest_inbound_scid_alias: None,
7196 outbound_scid_alias: 0,
7198 channel_pending_event_emitted: false,
7199 channel_ready_event_emitted: false,
7201 #[cfg(any(test, fuzzing))]
7202 historical_inbound_htlc_fulfills: HashSet::new(),
7207 blocked_monitor_updates: Vec::new(),
7209 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7215 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7216 /// should be sent back to the counterparty node.
7218 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7219 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7220 if self.context.is_outbound() {
7221 panic!("Tried to send accept_channel for an outbound channel?");
7224 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7225 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7227 panic!("Tried to send accept_channel after channel had moved forward");
7229 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7230 panic!("Tried to send an accept_channel for a channel that has already advanced");
7233 self.generate_accept_channel_message()
7236 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7237 /// inbound channel. If the intention is to accept an inbound channel, use
7238 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7240 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7241 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7242 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7243 let keys = self.context.get_holder_pubkeys();
7245 msgs::AcceptChannel {
7246 temporary_channel_id: self.context.channel_id,
7247 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7248 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7249 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7250 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7251 minimum_depth: self.context.minimum_depth.unwrap(),
7252 to_self_delay: self.context.get_holder_selected_contest_delay(),
7253 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7254 funding_pubkey: keys.funding_pubkey,
7255 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7256 payment_point: keys.payment_point,
7257 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7258 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7259 first_per_commitment_point,
7260 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7261 Some(script) => script.clone().into_inner(),
7262 None => Builder::new().into_script(),
7264 channel_type: Some(self.context.channel_type.clone()),
7266 next_local_nonce: None,
7270 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7271 /// inbound channel without accepting it.
7273 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7275 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7276 self.generate_accept_channel_message()
7279 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7280 let funding_script = self.context.get_funding_redeemscript();
7282 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7283 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7284 let trusted_tx = initial_commitment_tx.trust();
7285 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7286 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7287 // They sign the holder commitment transaction...
7288 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7289 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7290 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7291 encode::serialize_hex(&funding_script), &self.context.channel_id());
7292 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7294 Ok(initial_commitment_tx)
7297 pub fn funding_created<L: Deref>(
7298 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7299 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7303 if self.context.is_outbound() {
7304 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7307 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7308 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7310 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7311 // remember the channel, so it's safe to just send an error_message here and drop the
7313 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7315 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7316 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7317 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7318 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7321 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7322 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7323 // This is an externally observable change before we finish all our checks. In particular
7324 // check_funding_created_signature may fail.
7325 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7327 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7329 Err(ChannelError::Close(e)) => {
7330 self.context.channel_transaction_parameters.funding_outpoint = None;
7331 return Err((self, ChannelError::Close(e)));
7334 // The only error we know how to handle is ChannelError::Close, so we fall over here
7335 // to make sure we don't continue with an inconsistent state.
7336 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7340 let holder_commitment_tx = HolderCommitmentTransaction::new(
7341 initial_commitment_tx,
7344 &self.context.get_holder_pubkeys().funding_pubkey,
7345 self.context.counterparty_funding_pubkey()
7348 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7349 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7352 // Now that we're past error-generating stuff, update our local state:
7354 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7355 self.context.channel_id = funding_txo.to_channel_id();
7356 self.context.cur_counterparty_commitment_transaction_number -= 1;
7357 self.context.cur_holder_commitment_transaction_number -= 1;
7359 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7361 let funding_redeemscript = self.context.get_funding_redeemscript();
7362 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7363 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7364 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7365 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7366 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7367 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7368 shutdown_script, self.context.get_holder_selected_contest_delay(),
7369 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7370 &self.context.channel_transaction_parameters,
7371 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7373 holder_commitment_tx, best_block, self.context.counterparty_node_id);
7374 channel_monitor.provide_initial_counterparty_commitment_tx(
7375 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7376 self.context.cur_counterparty_commitment_transaction_number + 1,
7377 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7378 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7379 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7381 log_info!(logger, "{} funding_signed for peer for channel {}",
7382 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7384 // Promote the channel to a full-fledged one now that we have updated the state and have a
7385 // `ChannelMonitor`.
7386 let mut channel = Channel {
7387 context: self.context,
7389 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7390 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7392 Ok((channel, funding_signed, channel_monitor))
7396 const SERIALIZATION_VERSION: u8 = 3;
7397 const MIN_SERIALIZATION_VERSION: u8 = 3;
7399 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7405 impl Writeable for ChannelUpdateStatus {
7406 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7407 // We only care about writing out the current state as it was announced, ie only either
7408 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7409 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7411 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7412 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7413 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7414 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7420 impl Readable for ChannelUpdateStatus {
7421 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7422 Ok(match <u8 as Readable>::read(reader)? {
7423 0 => ChannelUpdateStatus::Enabled,
7424 1 => ChannelUpdateStatus::Disabled,
7425 _ => return Err(DecodeError::InvalidValue),
7430 impl Writeable for AnnouncementSigsState {
7431 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7432 // We only care about writing out the current state as if we had just disconnected, at
7433 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7435 AnnouncementSigsState::NotSent => 0u8.write(writer),
7436 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7437 AnnouncementSigsState::Committed => 0u8.write(writer),
7438 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7443 impl Readable for AnnouncementSigsState {
7444 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7445 Ok(match <u8 as Readable>::read(reader)? {
7446 0 => AnnouncementSigsState::NotSent,
7447 1 => AnnouncementSigsState::PeerReceived,
7448 _ => return Err(DecodeError::InvalidValue),
7453 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7454 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7455 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7458 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7460 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7461 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7462 // the low bytes now and the optional high bytes later.
7463 let user_id_low = self.context.user_id as u64;
7464 user_id_low.write(writer)?;
7466 // Version 1 deserializers expected to read parts of the config object here. Version 2
7467 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7468 // `minimum_depth` we simply write dummy values here.
7469 writer.write_all(&[0; 8])?;
7471 self.context.channel_id.write(writer)?;
7473 let mut channel_state = self.context.channel_state;
7474 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7475 channel_state.set_peer_disconnected();
7477 channel_state.to_u32().write(writer)?;
7479 self.context.channel_value_satoshis.write(writer)?;
7481 self.context.latest_monitor_update_id.write(writer)?;
7483 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7484 // deserialized from that format.
7485 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7486 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7487 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7489 self.context.destination_script.write(writer)?;
7491 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7492 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7493 self.context.value_to_self_msat.write(writer)?;
7495 let mut dropped_inbound_htlcs = 0;
7496 for htlc in self.context.pending_inbound_htlcs.iter() {
7497 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7498 dropped_inbound_htlcs += 1;
7501 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7502 for htlc in self.context.pending_inbound_htlcs.iter() {
7503 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7506 htlc.htlc_id.write(writer)?;
7507 htlc.amount_msat.write(writer)?;
7508 htlc.cltv_expiry.write(writer)?;
7509 htlc.payment_hash.write(writer)?;
7511 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7512 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7514 htlc_state.write(writer)?;
7516 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7518 htlc_state.write(writer)?;
7520 &InboundHTLCState::Committed => {
7523 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7525 removal_reason.write(writer)?;
7530 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7531 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7532 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7534 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7535 for htlc in self.context.pending_outbound_htlcs.iter() {
7536 htlc.htlc_id.write(writer)?;
7537 htlc.amount_msat.write(writer)?;
7538 htlc.cltv_expiry.write(writer)?;
7539 htlc.payment_hash.write(writer)?;
7540 htlc.source.write(writer)?;
7542 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7544 onion_packet.write(writer)?;
7546 &OutboundHTLCState::Committed => {
7549 &OutboundHTLCState::RemoteRemoved(_) => {
7550 // Treat this as a Committed because we haven't received the CS - they'll
7551 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7554 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7556 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7557 preimages.push(preimage);
7559 let reason: Option<&HTLCFailReason> = outcome.into();
7560 reason.write(writer)?;
7562 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7564 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7565 preimages.push(preimage);
7567 let reason: Option<&HTLCFailReason> = outcome.into();
7568 reason.write(writer)?;
7571 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7572 pending_outbound_blinding_points.push(htlc.blinding_point);
7575 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7576 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7577 // Vec of (htlc_id, failure_code, sha256_of_onion)
7578 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
7579 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7580 for update in self.context.holding_cell_htlc_updates.iter() {
7582 &HTLCUpdateAwaitingACK::AddHTLC {
7583 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7584 blinding_point, skimmed_fee_msat,
7587 amount_msat.write(writer)?;
7588 cltv_expiry.write(writer)?;
7589 payment_hash.write(writer)?;
7590 source.write(writer)?;
7591 onion_routing_packet.write(writer)?;
7593 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7594 holding_cell_blinding_points.push(blinding_point);
7596 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7598 payment_preimage.write(writer)?;
7599 htlc_id.write(writer)?;
7601 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7603 htlc_id.write(writer)?;
7604 err_packet.write(writer)?;
7606 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
7607 htlc_id, failure_code, sha256_of_onion
7609 // We don't want to break downgrading by adding a new variant, so write a dummy
7610 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
7611 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
7613 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
7615 htlc_id.write(writer)?;
7616 dummy_err_packet.write(writer)?;
7621 match self.context.resend_order {
7622 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7623 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7626 self.context.monitor_pending_channel_ready.write(writer)?;
7627 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7628 self.context.monitor_pending_commitment_signed.write(writer)?;
7630 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7631 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7632 pending_forward.write(writer)?;
7633 htlc_id.write(writer)?;
7636 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7637 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7638 htlc_source.write(writer)?;
7639 payment_hash.write(writer)?;
7640 fail_reason.write(writer)?;
7643 if self.context.is_outbound() {
7644 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7645 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7646 Some(feerate).write(writer)?;
7648 // As for inbound HTLCs, if the update was only announced and never committed in a
7649 // commitment_signed, drop it.
7650 None::<u32>.write(writer)?;
7652 self.context.holding_cell_update_fee.write(writer)?;
7654 self.context.next_holder_htlc_id.write(writer)?;
7655 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7656 self.context.update_time_counter.write(writer)?;
7657 self.context.feerate_per_kw.write(writer)?;
7659 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7660 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7661 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7662 // consider the stale state on reload.
7665 self.context.funding_tx_confirmed_in.write(writer)?;
7666 self.context.funding_tx_confirmation_height.write(writer)?;
7667 self.context.short_channel_id.write(writer)?;
7669 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7670 self.context.holder_dust_limit_satoshis.write(writer)?;
7671 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7673 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7674 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7676 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7677 self.context.holder_htlc_minimum_msat.write(writer)?;
7678 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7680 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7681 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7683 match &self.context.counterparty_forwarding_info {
7686 info.fee_base_msat.write(writer)?;
7687 info.fee_proportional_millionths.write(writer)?;
7688 info.cltv_expiry_delta.write(writer)?;
7690 None => 0u8.write(writer)?
7693 self.context.channel_transaction_parameters.write(writer)?;
7694 self.context.funding_transaction.write(writer)?;
7696 self.context.counterparty_cur_commitment_point.write(writer)?;
7697 self.context.counterparty_prev_commitment_point.write(writer)?;
7698 self.context.counterparty_node_id.write(writer)?;
7700 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7702 self.context.commitment_secrets.write(writer)?;
7704 self.context.channel_update_status.write(writer)?;
7706 #[cfg(any(test, fuzzing))]
7707 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7708 #[cfg(any(test, fuzzing))]
7709 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7710 htlc.write(writer)?;
7713 // If the channel type is something other than only-static-remote-key, then we need to have
7714 // older clients fail to deserialize this channel at all. If the type is
7715 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7717 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7718 Some(&self.context.channel_type) } else { None };
7720 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7721 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7722 // a different percentage of the channel value then 10%, which older versions of LDK used
7723 // to set it to before the percentage was made configurable.
7724 let serialized_holder_selected_reserve =
7725 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7726 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7728 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7729 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7730 let serialized_holder_htlc_max_in_flight =
7731 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7732 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7734 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7735 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7737 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7738 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7739 // we write the high bytes as an option here.
7740 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7742 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7744 write_tlv_fields!(writer, {
7745 (0, self.context.announcement_sigs, option),
7746 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7747 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7748 // them twice, once with their original default values above, and once as an option
7749 // here. On the read side, old versions will simply ignore the odd-type entries here,
7750 // and new versions map the default values to None and allow the TLV entries here to
7752 (1, self.context.minimum_depth, option),
7753 (2, chan_type, option),
7754 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7755 (4, serialized_holder_selected_reserve, option),
7756 (5, self.context.config, required),
7757 (6, serialized_holder_htlc_max_in_flight, option),
7758 (7, self.context.shutdown_scriptpubkey, option),
7759 (8, self.context.blocked_monitor_updates, optional_vec),
7760 (9, self.context.target_closing_feerate_sats_per_kw, option),
7761 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7762 (13, self.context.channel_creation_height, required),
7763 (15, preimages, required_vec),
7764 (17, self.context.announcement_sigs_state, required),
7765 (19, self.context.latest_inbound_scid_alias, option),
7766 (21, self.context.outbound_scid_alias, required),
7767 (23, channel_ready_event_emitted, option),
7768 (25, user_id_high_opt, option),
7769 (27, self.context.channel_keys_id, required),
7770 (28, holder_max_accepted_htlcs, option),
7771 (29, self.context.temporary_channel_id, option),
7772 (31, channel_pending_event_emitted, option),
7773 (35, pending_outbound_skimmed_fees, optional_vec),
7774 (37, holding_cell_skimmed_fees, optional_vec),
7775 (38, self.context.is_batch_funding, option),
7776 (39, pending_outbound_blinding_points, optional_vec),
7777 (41, holding_cell_blinding_points, optional_vec),
7778 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
7785 const MAX_ALLOC_SIZE: usize = 64*1024;
7786 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7788 ES::Target: EntropySource,
7789 SP::Target: SignerProvider
7791 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7792 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7793 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7795 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7796 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7797 // the low bytes now and the high bytes later.
7798 let user_id_low: u64 = Readable::read(reader)?;
7800 let mut config = Some(LegacyChannelConfig::default());
7802 // Read the old serialization of the ChannelConfig from version 0.0.98.
7803 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7804 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7805 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7806 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7808 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7809 let mut _val: u64 = Readable::read(reader)?;
7812 let channel_id = Readable::read(reader)?;
7813 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
7814 let channel_value_satoshis = Readable::read(reader)?;
7816 let latest_monitor_update_id = Readable::read(reader)?;
7818 let mut keys_data = None;
7820 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7821 // the `channel_keys_id` TLV is present below.
7822 let keys_len: u32 = Readable::read(reader)?;
7823 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7824 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7825 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7826 let mut data = [0; 1024];
7827 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7828 reader.read_exact(read_slice)?;
7829 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7833 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7834 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7835 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7838 let destination_script = Readable::read(reader)?;
7840 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7841 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7842 let value_to_self_msat = Readable::read(reader)?;
7844 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7846 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7847 for _ in 0..pending_inbound_htlc_count {
7848 pending_inbound_htlcs.push(InboundHTLCOutput {
7849 htlc_id: Readable::read(reader)?,
7850 amount_msat: Readable::read(reader)?,
7851 cltv_expiry: Readable::read(reader)?,
7852 payment_hash: Readable::read(reader)?,
7853 state: match <u8 as Readable>::read(reader)? {
7854 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7855 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7856 3 => InboundHTLCState::Committed,
7857 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7858 _ => return Err(DecodeError::InvalidValue),
7863 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7864 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7865 for _ in 0..pending_outbound_htlc_count {
7866 pending_outbound_htlcs.push(OutboundHTLCOutput {
7867 htlc_id: Readable::read(reader)?,
7868 amount_msat: Readable::read(reader)?,
7869 cltv_expiry: Readable::read(reader)?,
7870 payment_hash: Readable::read(reader)?,
7871 source: Readable::read(reader)?,
7872 state: match <u8 as Readable>::read(reader)? {
7873 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7874 1 => OutboundHTLCState::Committed,
7876 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7877 OutboundHTLCState::RemoteRemoved(option.into())
7880 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7881 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7884 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7885 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7887 _ => return Err(DecodeError::InvalidValue),
7889 skimmed_fee_msat: None,
7890 blinding_point: None,
7894 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7895 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7896 for _ in 0..holding_cell_htlc_update_count {
7897 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7898 0 => HTLCUpdateAwaitingACK::AddHTLC {
7899 amount_msat: Readable::read(reader)?,
7900 cltv_expiry: Readable::read(reader)?,
7901 payment_hash: Readable::read(reader)?,
7902 source: Readable::read(reader)?,
7903 onion_routing_packet: Readable::read(reader)?,
7904 skimmed_fee_msat: None,
7905 blinding_point: None,
7907 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7908 payment_preimage: Readable::read(reader)?,
7909 htlc_id: Readable::read(reader)?,
7911 2 => HTLCUpdateAwaitingACK::FailHTLC {
7912 htlc_id: Readable::read(reader)?,
7913 err_packet: Readable::read(reader)?,
7915 _ => return Err(DecodeError::InvalidValue),
7919 let resend_order = match <u8 as Readable>::read(reader)? {
7920 0 => RAACommitmentOrder::CommitmentFirst,
7921 1 => RAACommitmentOrder::RevokeAndACKFirst,
7922 _ => return Err(DecodeError::InvalidValue),
7925 let monitor_pending_channel_ready = Readable::read(reader)?;
7926 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7927 let monitor_pending_commitment_signed = Readable::read(reader)?;
7929 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7930 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7931 for _ in 0..monitor_pending_forwards_count {
7932 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7935 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7936 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7937 for _ in 0..monitor_pending_failures_count {
7938 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7941 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7943 let holding_cell_update_fee = Readable::read(reader)?;
7945 let next_holder_htlc_id = Readable::read(reader)?;
7946 let next_counterparty_htlc_id = Readable::read(reader)?;
7947 let update_time_counter = Readable::read(reader)?;
7948 let feerate_per_kw = Readable::read(reader)?;
7950 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7951 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7952 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7953 // consider the stale state on reload.
7954 match <u8 as Readable>::read(reader)? {
7957 let _: u32 = Readable::read(reader)?;
7958 let _: u64 = Readable::read(reader)?;
7959 let _: Signature = Readable::read(reader)?;
7961 _ => return Err(DecodeError::InvalidValue),
7964 let funding_tx_confirmed_in = Readable::read(reader)?;
7965 let funding_tx_confirmation_height = Readable::read(reader)?;
7966 let short_channel_id = Readable::read(reader)?;
7968 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7969 let holder_dust_limit_satoshis = Readable::read(reader)?;
7970 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7971 let mut counterparty_selected_channel_reserve_satoshis = None;
7973 // Read the old serialization from version 0.0.98.
7974 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7976 // Read the 8 bytes of backwards-compatibility data.
7977 let _dummy: u64 = Readable::read(reader)?;
7979 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7980 let holder_htlc_minimum_msat = Readable::read(reader)?;
7981 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7983 let mut minimum_depth = None;
7985 // Read the old serialization from version 0.0.98.
7986 minimum_depth = Some(Readable::read(reader)?);
7988 // Read the 4 bytes of backwards-compatibility data.
7989 let _dummy: u32 = Readable::read(reader)?;
7992 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7994 1 => Some(CounterpartyForwardingInfo {
7995 fee_base_msat: Readable::read(reader)?,
7996 fee_proportional_millionths: Readable::read(reader)?,
7997 cltv_expiry_delta: Readable::read(reader)?,
7999 _ => return Err(DecodeError::InvalidValue),
8002 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
8003 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
8005 let counterparty_cur_commitment_point = Readable::read(reader)?;
8007 let counterparty_prev_commitment_point = Readable::read(reader)?;
8008 let counterparty_node_id = Readable::read(reader)?;
8010 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
8011 let commitment_secrets = Readable::read(reader)?;
8013 let channel_update_status = Readable::read(reader)?;
8015 #[cfg(any(test, fuzzing))]
8016 let mut historical_inbound_htlc_fulfills = HashSet::new();
8017 #[cfg(any(test, fuzzing))]
8019 let htlc_fulfills_len: u64 = Readable::read(reader)?;
8020 for _ in 0..htlc_fulfills_len {
8021 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
8025 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
8026 Some((feerate, if channel_parameters.is_outbound_from_holder {
8027 FeeUpdateState::Outbound
8029 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
8035 let mut announcement_sigs = None;
8036 let mut target_closing_feerate_sats_per_kw = None;
8037 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
8038 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
8039 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
8040 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
8041 // only, so we default to that if none was written.
8042 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
8043 let mut channel_creation_height = Some(serialized_height);
8044 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
8046 // If we read an old Channel, for simplicity we just treat it as "we never sent an
8047 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
8048 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
8049 let mut latest_inbound_scid_alias = None;
8050 let mut outbound_scid_alias = None;
8051 let mut channel_pending_event_emitted = None;
8052 let mut channel_ready_event_emitted = None;
8054 let mut user_id_high_opt: Option<u64> = None;
8055 let mut channel_keys_id: Option<[u8; 32]> = None;
8056 let mut temporary_channel_id: Option<ChannelId> = None;
8057 let mut holder_max_accepted_htlcs: Option<u16> = None;
8059 let mut blocked_monitor_updates = Some(Vec::new());
8061 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8062 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8064 let mut is_batch_funding: Option<()> = None;
8066 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8067 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8069 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8071 read_tlv_fields!(reader, {
8072 (0, announcement_sigs, option),
8073 (1, minimum_depth, option),
8074 (2, channel_type, option),
8075 (3, counterparty_selected_channel_reserve_satoshis, option),
8076 (4, holder_selected_channel_reserve_satoshis, option),
8077 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8078 (6, holder_max_htlc_value_in_flight_msat, option),
8079 (7, shutdown_scriptpubkey, option),
8080 (8, blocked_monitor_updates, optional_vec),
8081 (9, target_closing_feerate_sats_per_kw, option),
8082 (11, monitor_pending_finalized_fulfills, optional_vec),
8083 (13, channel_creation_height, option),
8084 (15, preimages_opt, optional_vec),
8085 (17, announcement_sigs_state, option),
8086 (19, latest_inbound_scid_alias, option),
8087 (21, outbound_scid_alias, option),
8088 (23, channel_ready_event_emitted, option),
8089 (25, user_id_high_opt, option),
8090 (27, channel_keys_id, option),
8091 (28, holder_max_accepted_htlcs, option),
8092 (29, temporary_channel_id, option),
8093 (31, channel_pending_event_emitted, option),
8094 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8095 (37, holding_cell_skimmed_fees_opt, optional_vec),
8096 (38, is_batch_funding, option),
8097 (39, pending_outbound_blinding_points_opt, optional_vec),
8098 (41, holding_cell_blinding_points_opt, optional_vec),
8099 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8102 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8103 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8104 // If we've gotten to the funding stage of the channel, populate the signer with its
8105 // required channel parameters.
8106 if channel_state >= ChannelState::FundingNegotiated {
8107 holder_signer.provide_channel_parameters(&channel_parameters);
8109 (channel_keys_id, holder_signer)
8111 // `keys_data` can be `None` if we had corrupted data.
8112 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8113 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8114 (holder_signer.channel_keys_id(), holder_signer)
8117 if let Some(preimages) = preimages_opt {
8118 let mut iter = preimages.into_iter();
8119 for htlc in pending_outbound_htlcs.iter_mut() {
8121 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8122 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8124 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8125 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8130 // We expect all preimages to be consumed above
8131 if iter.next().is_some() {
8132 return Err(DecodeError::InvalidValue);
8136 let chan_features = channel_type.as_ref().unwrap();
8137 if !chan_features.is_subset(our_supported_features) {
8138 // If the channel was written by a new version and negotiated with features we don't
8139 // understand yet, refuse to read it.
8140 return Err(DecodeError::UnknownRequiredFeature);
8143 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8144 // To account for that, we're proactively setting/overriding the field here.
8145 channel_parameters.channel_type_features = chan_features.clone();
8147 let mut secp_ctx = Secp256k1::new();
8148 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8150 // `user_id` used to be a single u64 value. In order to remain backwards
8151 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8152 // separate u64 values.
8153 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8155 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8157 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8158 let mut iter = skimmed_fees.into_iter();
8159 for htlc in pending_outbound_htlcs.iter_mut() {
8160 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8162 // We expect all skimmed fees to be consumed above
8163 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8165 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8166 let mut iter = skimmed_fees.into_iter();
8167 for htlc in holding_cell_htlc_updates.iter_mut() {
8168 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8169 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8172 // We expect all skimmed fees to be consumed above
8173 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8175 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8176 let mut iter = blinding_pts.into_iter();
8177 for htlc in pending_outbound_htlcs.iter_mut() {
8178 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8180 // We expect all blinding points to be consumed above
8181 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8183 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8184 let mut iter = blinding_pts.into_iter();
8185 for htlc in holding_cell_htlc_updates.iter_mut() {
8186 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8187 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8190 // We expect all blinding points to be consumed above
8191 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8194 if let Some(malformed_htlcs) = malformed_htlcs {
8195 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
8196 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
8197 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
8198 let matches = *htlc_id == malformed_htlc_id;
8199 if matches { debug_assert!(err_packet.data.is_empty()) }
8202 }).ok_or(DecodeError::InvalidValue)?;
8203 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
8204 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
8206 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
8211 context: ChannelContext {
8214 config: config.unwrap(),
8218 // Note that we don't care about serializing handshake limits as we only ever serialize
8219 // channel data after the handshake has completed.
8220 inbound_handshake_limits_override: None,
8223 temporary_channel_id,
8225 announcement_sigs_state: announcement_sigs_state.unwrap(),
8227 channel_value_satoshis,
8229 latest_monitor_update_id,
8231 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8232 shutdown_scriptpubkey,
8235 cur_holder_commitment_transaction_number,
8236 cur_counterparty_commitment_transaction_number,
8239 holder_max_accepted_htlcs,
8240 pending_inbound_htlcs,
8241 pending_outbound_htlcs,
8242 holding_cell_htlc_updates,
8246 monitor_pending_channel_ready,
8247 monitor_pending_revoke_and_ack,
8248 monitor_pending_commitment_signed,
8249 monitor_pending_forwards,
8250 monitor_pending_failures,
8251 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8253 signer_pending_commitment_update: false,
8254 signer_pending_funding: false,
8257 holding_cell_update_fee,
8258 next_holder_htlc_id,
8259 next_counterparty_htlc_id,
8260 update_time_counter,
8263 #[cfg(debug_assertions)]
8264 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8265 #[cfg(debug_assertions)]
8266 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8268 last_sent_closing_fee: None,
8269 pending_counterparty_closing_signed: None,
8270 expecting_peer_commitment_signed: false,
8271 closing_fee_limits: None,
8272 target_closing_feerate_sats_per_kw,
8274 funding_tx_confirmed_in,
8275 funding_tx_confirmation_height,
8277 channel_creation_height: channel_creation_height.unwrap(),
8279 counterparty_dust_limit_satoshis,
8280 holder_dust_limit_satoshis,
8281 counterparty_max_htlc_value_in_flight_msat,
8282 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8283 counterparty_selected_channel_reserve_satoshis,
8284 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8285 counterparty_htlc_minimum_msat,
8286 holder_htlc_minimum_msat,
8287 counterparty_max_accepted_htlcs,
8290 counterparty_forwarding_info,
8292 channel_transaction_parameters: channel_parameters,
8293 funding_transaction,
8296 counterparty_cur_commitment_point,
8297 counterparty_prev_commitment_point,
8298 counterparty_node_id,
8300 counterparty_shutdown_scriptpubkey,
8304 channel_update_status,
8305 closing_signed_in_flight: false,
8309 #[cfg(any(test, fuzzing))]
8310 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8311 #[cfg(any(test, fuzzing))]
8312 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8314 workaround_lnd_bug_4006: None,
8315 sent_message_awaiting_response: None,
8317 latest_inbound_scid_alias,
8318 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8319 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8321 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8322 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8324 #[cfg(any(test, fuzzing))]
8325 historical_inbound_htlc_fulfills,
8327 channel_type: channel_type.unwrap(),
8330 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8339 use bitcoin::blockdata::constants::ChainHash;
8340 use bitcoin::blockdata::script::{ScriptBuf, Builder};
8341 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8342 use bitcoin::blockdata::opcodes;
8343 use bitcoin::network::constants::Network;
8344 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
8345 use crate::ln::{PaymentHash, PaymentPreimage};
8346 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8347 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8348 use crate::ln::channel::InitFeatures;
8349 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8350 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8351 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8352 use crate::ln::msgs;
8353 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8354 use crate::ln::script::ShutdownScript;
8355 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8356 use crate::chain::BestBlock;
8357 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8358 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8359 use crate::chain::transaction::OutPoint;
8360 use crate::routing::router::{Path, RouteHop};
8361 use crate::util::config::UserConfig;
8362 use crate::util::errors::APIError;
8363 use crate::util::ser::{ReadableArgs, Writeable};
8364 use crate::util::test_utils;
8365 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8366 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8367 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8368 use bitcoin::secp256k1::{SecretKey,PublicKey};
8369 use bitcoin::hashes::sha256::Hash as Sha256;
8370 use bitcoin::hashes::Hash;
8371 use bitcoin::hashes::hex::FromHex;
8372 use bitcoin::hash_types::WPubkeyHash;
8373 use bitcoin::blockdata::locktime::absolute::LockTime;
8374 use bitcoin::address::{WitnessProgram, WitnessVersion};
8375 use crate::prelude::*;
8377 struct TestFeeEstimator {
8380 impl FeeEstimator for TestFeeEstimator {
8381 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8387 fn test_max_funding_satoshis_no_wumbo() {
8388 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8389 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8390 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8394 signer: InMemorySigner,
8397 impl EntropySource for Keys {
8398 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8401 impl SignerProvider for Keys {
8402 type EcdsaSigner = InMemorySigner;
8404 type TaprootSigner = InMemorySigner;
8406 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8407 self.signer.channel_keys_id()
8410 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8414 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8416 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8417 let secp_ctx = Secp256k1::signing_only();
8418 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8419 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8420 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8423 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8424 let secp_ctx = Secp256k1::signing_only();
8425 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8426 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8430 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8431 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8432 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8436 fn upfront_shutdown_script_incompatibility() {
8437 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8438 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8439 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8442 let seed = [42; 32];
8443 let network = Network::Testnet;
8444 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8445 keys_provider.expect(OnGetShutdownScriptpubkey {
8446 returns: non_v0_segwit_shutdown_script.clone(),
8449 let secp_ctx = Secp256k1::new();
8450 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8451 let config = UserConfig::default();
8452 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8453 Err(APIError::IncompatibleShutdownScript { script }) => {
8454 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8456 Err(e) => panic!("Unexpected error: {:?}", e),
8457 Ok(_) => panic!("Expected error"),
8461 // Check that, during channel creation, we use the same feerate in the open channel message
8462 // as we do in the Channel object creation itself.
8464 fn test_open_channel_msg_fee() {
8465 let original_fee = 253;
8466 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8467 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8468 let secp_ctx = Secp256k1::new();
8469 let seed = [42; 32];
8470 let network = Network::Testnet;
8471 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8473 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8474 let config = UserConfig::default();
8475 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8477 // Now change the fee so we can check that the fee in the open_channel message is the
8478 // same as the old fee.
8479 fee_est.fee_est = 500;
8480 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8481 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8485 fn test_holder_vs_counterparty_dust_limit() {
8486 // Test that when calculating the local and remote commitment transaction fees, the correct
8487 // dust limits are used.
8488 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8489 let secp_ctx = Secp256k1::new();
8490 let seed = [42; 32];
8491 let network = Network::Testnet;
8492 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8493 let logger = test_utils::TestLogger::new();
8494 let best_block = BestBlock::from_network(network);
8496 // Go through the flow of opening a channel between two nodes, making sure
8497 // they have different dust limits.
8499 // Create Node A's channel pointing to Node B's pubkey
8500 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8501 let config = UserConfig::default();
8502 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8504 // Create Node B's channel by receiving Node A's open_channel message
8505 // Make sure A's dust limit is as we expect.
8506 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8507 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8508 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8510 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8511 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8512 accept_channel_msg.dust_limit_satoshis = 546;
8513 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8514 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8516 // Node A --> Node B: funding created
8517 let output_script = node_a_chan.context.get_funding_redeemscript();
8518 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8519 value: 10000000, script_pubkey: output_script.clone(),
8521 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8522 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8523 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8525 // Node B --> Node A: funding signed
8526 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8527 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8529 // Put some inbound and outbound HTLCs in A's channel.
8530 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8531 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8533 amount_msat: htlc_amount_msat,
8534 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8535 cltv_expiry: 300000000,
8536 state: InboundHTLCState::Committed,
8539 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8541 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8542 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8543 cltv_expiry: 200000000,
8544 state: OutboundHTLCState::Committed,
8545 source: HTLCSource::OutboundRoute {
8546 path: Path { hops: Vec::new(), blinded_tail: None },
8547 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8548 first_hop_htlc_msat: 548,
8549 payment_id: PaymentId([42; 32]),
8551 skimmed_fee_msat: None,
8552 blinding_point: None,
8555 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8556 // the dust limit check.
8557 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8558 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8559 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8560 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8562 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8563 // of the HTLCs are seen to be above the dust limit.
8564 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8565 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8566 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8567 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8568 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8572 fn test_timeout_vs_success_htlc_dust_limit() {
8573 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8574 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8575 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8576 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8577 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8578 let secp_ctx = Secp256k1::new();
8579 let seed = [42; 32];
8580 let network = Network::Testnet;
8581 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8583 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8584 let config = UserConfig::default();
8585 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8587 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8588 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8590 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8591 // counted as dust when it shouldn't be.
8592 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8593 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8594 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8595 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8597 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8598 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8599 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8600 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8601 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8603 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8605 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8606 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8607 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8608 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8609 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8611 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8612 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8613 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8614 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8615 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8619 fn channel_reestablish_no_updates() {
8620 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8621 let logger = test_utils::TestLogger::new();
8622 let secp_ctx = Secp256k1::new();
8623 let seed = [42; 32];
8624 let network = Network::Testnet;
8625 let best_block = BestBlock::from_network(network);
8626 let chain_hash = ChainHash::using_genesis_block(network);
8627 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8629 // Go through the flow of opening a channel between two nodes.
8631 // Create Node A's channel pointing to Node B's pubkey
8632 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8633 let config = UserConfig::default();
8634 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8636 // Create Node B's channel by receiving Node A's open_channel message
8637 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8638 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8639 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8641 // Node B --> Node A: accept channel
8642 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8643 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8645 // Node A --> Node B: funding created
8646 let output_script = node_a_chan.context.get_funding_redeemscript();
8647 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8648 value: 10000000, script_pubkey: output_script.clone(),
8650 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8651 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8652 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8654 // Node B --> Node A: funding signed
8655 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8656 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8658 // Now disconnect the two nodes and check that the commitment point in
8659 // Node B's channel_reestablish message is sane.
8660 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8661 let msg = node_b_chan.get_channel_reestablish(&&logger);
8662 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8663 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8664 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8666 // Check that the commitment point in Node A's channel_reestablish message
8668 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8669 let msg = node_a_chan.get_channel_reestablish(&&logger);
8670 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8671 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8672 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8676 fn test_configured_holder_max_htlc_value_in_flight() {
8677 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8678 let logger = test_utils::TestLogger::new();
8679 let secp_ctx = Secp256k1::new();
8680 let seed = [42; 32];
8681 let network = Network::Testnet;
8682 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8683 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8684 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8686 let mut config_2_percent = UserConfig::default();
8687 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8688 let mut config_99_percent = UserConfig::default();
8689 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8690 let mut config_0_percent = UserConfig::default();
8691 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8692 let mut config_101_percent = UserConfig::default();
8693 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8695 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8696 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8697 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8698 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8699 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8700 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8702 // Test with the upper bound - 1 of valid values (99%).
8703 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8704 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8705 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8707 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8709 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8710 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8711 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8712 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8713 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8714 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8716 // Test with the upper bound - 1 of valid values (99%).
8717 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8718 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8719 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8721 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8722 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8723 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8724 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8725 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8727 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8728 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8730 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8731 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8732 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8734 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8735 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8736 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8737 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8738 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8740 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8741 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8743 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8744 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8745 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8749 fn test_configured_holder_selected_channel_reserve_satoshis() {
8751 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8752 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8753 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8755 // Test with valid but unreasonably high channel reserves
8756 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8757 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8758 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8760 // Test with calculated channel reserve less than lower bound
8761 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8762 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8764 // Test with invalid channel reserves since sum of both is greater than or equal
8766 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8767 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8770 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8771 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8772 let logger = test_utils::TestLogger::new();
8773 let secp_ctx = Secp256k1::new();
8774 let seed = [42; 32];
8775 let network = Network::Testnet;
8776 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8777 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8778 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8781 let mut outbound_node_config = UserConfig::default();
8782 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8783 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8785 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8786 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8788 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8789 let mut inbound_node_config = UserConfig::default();
8790 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8792 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8793 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8795 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8797 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8798 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8800 // Channel Negotiations failed
8801 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8802 assert!(result.is_err());
8807 fn channel_update() {
8808 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8809 let logger = test_utils::TestLogger::new();
8810 let secp_ctx = Secp256k1::new();
8811 let seed = [42; 32];
8812 let network = Network::Testnet;
8813 let best_block = BestBlock::from_network(network);
8814 let chain_hash = ChainHash::using_genesis_block(network);
8815 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8817 // Create Node A's channel pointing to Node B's pubkey
8818 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8819 let config = UserConfig::default();
8820 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8822 // Create Node B's channel by receiving Node A's open_channel message
8823 // Make sure A's dust limit is as we expect.
8824 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8825 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8826 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8828 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8829 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8830 accept_channel_msg.dust_limit_satoshis = 546;
8831 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8832 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8834 // Node A --> Node B: funding created
8835 let output_script = node_a_chan.context.get_funding_redeemscript();
8836 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8837 value: 10000000, script_pubkey: output_script.clone(),
8839 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8840 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8841 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8843 // Node B --> Node A: funding signed
8844 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8845 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8847 // Make sure that receiving a channel update will update the Channel as expected.
8848 let update = ChannelUpdate {
8849 contents: UnsignedChannelUpdate {
8851 short_channel_id: 0,
8854 cltv_expiry_delta: 100,
8855 htlc_minimum_msat: 5,
8856 htlc_maximum_msat: MAX_VALUE_MSAT,
8858 fee_proportional_millionths: 11,
8859 excess_data: Vec::new(),
8861 signature: Signature::from(unsafe { FFISignature::new() })
8863 assert!(node_a_chan.channel_update(&update).unwrap());
8865 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8866 // change our official htlc_minimum_msat.
8867 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8868 match node_a_chan.context.counterparty_forwarding_info() {
8870 assert_eq!(info.cltv_expiry_delta, 100);
8871 assert_eq!(info.fee_base_msat, 110);
8872 assert_eq!(info.fee_proportional_millionths, 11);
8874 None => panic!("expected counterparty forwarding info to be Some")
8877 assert!(!node_a_chan.channel_update(&update).unwrap());
8881 fn blinding_point_skimmed_fee_malformed_ser() {
8882 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
8884 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8885 let secp_ctx = Secp256k1::new();
8886 let seed = [42; 32];
8887 let network = Network::Testnet;
8888 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8890 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8891 let config = UserConfig::default();
8892 let features = channelmanager::provided_init_features(&config);
8893 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8894 let mut chan = Channel { context: outbound_chan.context };
8896 let dummy_htlc_source = HTLCSource::OutboundRoute {
8898 hops: vec![RouteHop {
8899 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8900 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8901 cltv_expiry_delta: 0, maybe_announced_channel: false,
8905 session_priv: test_utils::privkey(42),
8906 first_hop_htlc_msat: 0,
8907 payment_id: PaymentId([42; 32]),
8909 let dummy_outbound_output = OutboundHTLCOutput {
8912 payment_hash: PaymentHash([43; 32]),
8914 state: OutboundHTLCState::Committed,
8915 source: dummy_htlc_source.clone(),
8916 skimmed_fee_msat: None,
8917 blinding_point: None,
8919 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8920 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8922 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8925 htlc.skimmed_fee_msat = Some(1);
8928 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8930 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8933 payment_hash: PaymentHash([43; 32]),
8934 source: dummy_htlc_source.clone(),
8935 onion_routing_packet: msgs::OnionPacket {
8937 public_key: Ok(test_utils::pubkey(1)),
8938 hop_data: [0; 20*65],
8941 skimmed_fee_msat: None,
8942 blinding_point: None,
8944 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8945 payment_preimage: PaymentPreimage([42; 32]),
8948 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
8949 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
8951 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
8952 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
8954 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
8957 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8958 } else if i % 5 == 1 {
8959 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8960 } else if i % 5 == 2 {
8961 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8962 if let HTLCUpdateAwaitingACK::AddHTLC {
8963 ref mut blinding_point, ref mut skimmed_fee_msat, ..
8964 } = &mut dummy_add {
8965 *blinding_point = Some(test_utils::pubkey(42 + i));
8966 *skimmed_fee_msat = Some(42);
8968 holding_cell_htlc_updates.push(dummy_add);
8969 } else if i % 5 == 3 {
8970 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
8972 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
8975 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8977 // Encode and decode the channel and ensure that the HTLCs within are the same.
8978 let encoded_chan = chan.encode();
8979 let mut s = crate::io::Cursor::new(&encoded_chan);
8980 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8981 let features = channelmanager::provided_channel_type_features(&config);
8982 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8983 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8984 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8987 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8989 fn outbound_commitment_test() {
8990 use bitcoin::sighash;
8991 use bitcoin::consensus::encode::serialize;
8992 use bitcoin::sighash::EcdsaSighashType;
8993 use bitcoin::hashes::hex::FromHex;
8994 use bitcoin::hash_types::Txid;
8995 use bitcoin::secp256k1::Message;
8996 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8997 use crate::ln::PaymentPreimage;
8998 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8999 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
9000 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
9001 use crate::util::logger::Logger;
9002 use crate::sync::Arc;
9003 use core::str::FromStr;
9004 use hex::DisplayHex;
9006 // Test vectors from BOLT 3 Appendices C and F (anchors):
9007 let feeest = TestFeeEstimator{fee_est: 15000};
9008 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
9009 let secp_ctx = Secp256k1::new();
9011 let mut signer = InMemorySigner::new(
9013 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
9014 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9015 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9016 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
9017 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9019 // These aren't set in the test vectors:
9020 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
9026 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
9027 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
9028 let keys_provider = Keys { signer: signer.clone() };
9030 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9031 let mut config = UserConfig::default();
9032 config.channel_handshake_config.announced_channel = false;
9033 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
9034 chan.context.holder_dust_limit_satoshis = 546;
9035 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
9037 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
9039 let counterparty_pubkeys = ChannelPublicKeys {
9040 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
9041 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
9042 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
9043 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
9044 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
9046 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
9047 CounterpartyChannelTransactionParameters {
9048 pubkeys: counterparty_pubkeys.clone(),
9049 selected_contest_delay: 144
9051 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
9052 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9054 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9055 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9057 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9058 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9060 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9061 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9063 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9064 // derived from a commitment_seed, so instead we copy it here and call
9065 // build_commitment_transaction.
9066 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9067 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9068 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9069 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9070 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9072 macro_rules! test_commitment {
9073 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9074 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9075 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9079 macro_rules! test_commitment_with_anchors {
9080 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9081 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9082 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9086 macro_rules! test_commitment_common {
9087 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9088 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9090 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9091 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9093 let htlcs = commitment_stats.htlcs_included.drain(..)
9094 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9096 (commitment_stats.tx, htlcs)
9098 let trusted_tx = commitment_tx.trust();
9099 let unsigned_tx = trusted_tx.built_transaction();
9100 let redeemscript = chan.context.get_funding_redeemscript();
9101 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9102 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9103 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9104 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9106 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9107 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9108 let mut counterparty_htlc_sigs = Vec::new();
9109 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9111 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9112 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9113 counterparty_htlc_sigs.push(remote_signature);
9115 assert_eq!(htlcs.len(), per_htlc.len());
9117 let holder_commitment_tx = HolderCommitmentTransaction::new(
9118 commitment_tx.clone(),
9119 counterparty_signature,
9120 counterparty_htlc_sigs,
9121 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9122 chan.context.counterparty_funding_pubkey()
9124 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9125 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9127 let funding_redeemscript = chan.context.get_funding_redeemscript();
9128 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9129 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9131 // ((htlc, counterparty_sig), (index, holder_sig))
9132 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
9135 log_trace!(logger, "verifying htlc {}", $htlc_idx);
9136 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9138 let ref htlc = htlcs[$htlc_idx];
9139 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
9140 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9141 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9142 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9143 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9144 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9145 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9147 let mut preimage: Option<PaymentPreimage> = None;
9150 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9151 if out == htlc.payment_hash {
9152 preimage = Some(PaymentPreimage([i; 32]));
9156 assert!(preimage.is_some());
9159 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
9160 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
9161 channel_derivation_parameters: ChannelDerivationParameters {
9162 value_satoshis: chan.context.channel_value_satoshis,
9163 keys_id: chan.context.channel_keys_id,
9164 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9166 commitment_txid: trusted_tx.txid(),
9167 per_commitment_number: trusted_tx.commitment_number(),
9168 per_commitment_point: trusted_tx.per_commitment_point(),
9169 feerate_per_kw: trusted_tx.feerate_per_kw(),
9171 preimage: preimage.clone(),
9172 counterparty_sig: *htlc_counterparty_sig,
9173 }, &secp_ctx).unwrap();
9174 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9175 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9177 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9178 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9179 let trusted_tx = holder_commitment_tx.trust();
9180 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9181 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9182 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9184 assert!(htlc_counterparty_sig_iter.next().is_none());
9188 // anchors: simple commitment tx with no HTLCs and single anchor
9189 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9190 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9191 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9193 // simple commitment tx with no HTLCs
9194 chan.context.value_to_self_msat = 7000000000;
9196 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9197 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9198 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9200 // anchors: simple commitment tx with no HTLCs
9201 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9202 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9203 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9205 chan.context.pending_inbound_htlcs.push({
9206 let mut out = InboundHTLCOutput{
9208 amount_msat: 1000000,
9210 payment_hash: PaymentHash([0; 32]),
9211 state: InboundHTLCState::Committed,
9213 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9216 chan.context.pending_inbound_htlcs.push({
9217 let mut out = InboundHTLCOutput{
9219 amount_msat: 2000000,
9221 payment_hash: PaymentHash([0; 32]),
9222 state: InboundHTLCState::Committed,
9224 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9227 chan.context.pending_outbound_htlcs.push({
9228 let mut out = OutboundHTLCOutput{
9230 amount_msat: 2000000,
9232 payment_hash: PaymentHash([0; 32]),
9233 state: OutboundHTLCState::Committed,
9234 source: HTLCSource::dummy(),
9235 skimmed_fee_msat: None,
9236 blinding_point: None,
9238 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9241 chan.context.pending_outbound_htlcs.push({
9242 let mut out = OutboundHTLCOutput{
9244 amount_msat: 3000000,
9246 payment_hash: PaymentHash([0; 32]),
9247 state: OutboundHTLCState::Committed,
9248 source: HTLCSource::dummy(),
9249 skimmed_fee_msat: None,
9250 blinding_point: None,
9252 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9255 chan.context.pending_inbound_htlcs.push({
9256 let mut out = InboundHTLCOutput{
9258 amount_msat: 4000000,
9260 payment_hash: PaymentHash([0; 32]),
9261 state: InboundHTLCState::Committed,
9263 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9267 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9268 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9269 chan.context.feerate_per_kw = 0;
9271 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9272 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9273 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9276 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9277 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9278 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9281 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9282 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9283 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9286 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9287 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9288 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9291 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9292 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9293 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9296 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9297 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9298 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9301 // commitment tx with seven outputs untrimmed (maximum feerate)
9302 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9303 chan.context.feerate_per_kw = 647;
9305 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9306 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9307 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9310 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9311 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9312 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9315 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9316 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9317 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9320 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9321 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9322 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9325 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9326 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9327 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9330 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9331 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9332 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9335 // commitment tx with six outputs untrimmed (minimum feerate)
9336 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9337 chan.context.feerate_per_kw = 648;
9339 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9340 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9341 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9344 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9345 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9346 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9349 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9350 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9351 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9354 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9355 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9356 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9359 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9360 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9361 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9364 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9365 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9366 chan.context.feerate_per_kw = 645;
9367 chan.context.holder_dust_limit_satoshis = 1001;
9369 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9370 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9371 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9374 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9375 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9376 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9379 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9380 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9381 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9384 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9385 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9386 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9389 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9390 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9391 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9394 // commitment tx with six outputs untrimmed (maximum feerate)
9395 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9396 chan.context.feerate_per_kw = 2069;
9397 chan.context.holder_dust_limit_satoshis = 546;
9399 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9400 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9401 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9404 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9405 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9406 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9409 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9410 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9411 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9414 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9415 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9416 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9419 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9420 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9421 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9424 // commitment tx with five outputs untrimmed (minimum feerate)
9425 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9426 chan.context.feerate_per_kw = 2070;
9428 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9429 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9430 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9433 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9434 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9435 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9438 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9439 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9440 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9443 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9444 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9445 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9448 // commitment tx with five outputs untrimmed (maximum feerate)
9449 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9450 chan.context.feerate_per_kw = 2194;
9452 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9453 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9454 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9457 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9458 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9459 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9462 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9463 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9464 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9467 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9468 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9469 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9472 // commitment tx with four outputs untrimmed (minimum feerate)
9473 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9474 chan.context.feerate_per_kw = 2195;
9476 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9477 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9478 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9481 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9482 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9483 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9486 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9487 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9488 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9491 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9492 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9493 chan.context.feerate_per_kw = 2185;
9494 chan.context.holder_dust_limit_satoshis = 2001;
9495 let cached_channel_type = chan.context.channel_type;
9496 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9498 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9499 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9500 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9503 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9504 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9505 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9508 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9509 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9510 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9513 // commitment tx with four outputs untrimmed (maximum feerate)
9514 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9515 chan.context.feerate_per_kw = 3702;
9516 chan.context.holder_dust_limit_satoshis = 546;
9517 chan.context.channel_type = cached_channel_type.clone();
9519 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9520 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9521 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9524 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9525 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9526 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9529 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9530 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9531 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9534 // commitment tx with three outputs untrimmed (minimum feerate)
9535 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9536 chan.context.feerate_per_kw = 3703;
9538 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9539 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9540 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9543 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9544 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9545 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9548 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9549 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9550 chan.context.feerate_per_kw = 3687;
9551 chan.context.holder_dust_limit_satoshis = 3001;
9552 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9554 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9555 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9556 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9559 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9560 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9561 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9564 // commitment tx with three outputs untrimmed (maximum feerate)
9565 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9566 chan.context.feerate_per_kw = 4914;
9567 chan.context.holder_dust_limit_satoshis = 546;
9568 chan.context.channel_type = cached_channel_type.clone();
9570 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9571 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9572 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9575 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9576 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9577 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9580 // commitment tx with two outputs untrimmed (minimum feerate)
9581 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9582 chan.context.feerate_per_kw = 4915;
9583 chan.context.holder_dust_limit_satoshis = 546;
9585 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9586 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9587 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9589 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9590 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9591 chan.context.feerate_per_kw = 4894;
9592 chan.context.holder_dust_limit_satoshis = 4001;
9593 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9595 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9596 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9597 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9599 // commitment tx with two outputs untrimmed (maximum feerate)
9600 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9601 chan.context.feerate_per_kw = 9651180;
9602 chan.context.holder_dust_limit_satoshis = 546;
9603 chan.context.channel_type = cached_channel_type.clone();
9605 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9606 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9607 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9609 // commitment tx with one output untrimmed (minimum feerate)
9610 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9611 chan.context.feerate_per_kw = 9651181;
9613 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9614 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9615 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9617 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9618 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9619 chan.context.feerate_per_kw = 6216010;
9620 chan.context.holder_dust_limit_satoshis = 4001;
9621 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9623 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9624 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9625 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9627 // commitment tx with fee greater than funder amount
9628 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9629 chan.context.feerate_per_kw = 9651936;
9630 chan.context.holder_dust_limit_satoshis = 546;
9631 chan.context.channel_type = cached_channel_type;
9633 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9634 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9635 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9637 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9638 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9639 chan.context.feerate_per_kw = 253;
9640 chan.context.pending_inbound_htlcs.clear();
9641 chan.context.pending_inbound_htlcs.push({
9642 let mut out = InboundHTLCOutput{
9644 amount_msat: 2000000,
9646 payment_hash: PaymentHash([0; 32]),
9647 state: InboundHTLCState::Committed,
9649 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9652 chan.context.pending_outbound_htlcs.clear();
9653 chan.context.pending_outbound_htlcs.push({
9654 let mut out = OutboundHTLCOutput{
9656 amount_msat: 5000001,
9658 payment_hash: PaymentHash([0; 32]),
9659 state: OutboundHTLCState::Committed,
9660 source: HTLCSource::dummy(),
9661 skimmed_fee_msat: None,
9662 blinding_point: None,
9664 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9667 chan.context.pending_outbound_htlcs.push({
9668 let mut out = OutboundHTLCOutput{
9670 amount_msat: 5000000,
9672 payment_hash: PaymentHash([0; 32]),
9673 state: OutboundHTLCState::Committed,
9674 source: HTLCSource::dummy(),
9675 skimmed_fee_msat: None,
9676 blinding_point: None,
9678 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9682 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9683 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9684 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9687 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9688 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9689 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9691 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9692 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9693 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9695 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9696 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9697 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9700 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9701 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9702 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9703 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9706 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9707 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9708 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9710 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9711 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9712 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9714 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9715 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9716 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9721 fn test_per_commitment_secret_gen() {
9722 // Test vectors from BOLT 3 Appendix D:
9724 let mut seed = [0; 32];
9725 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9726 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9727 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9729 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9730 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9731 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9733 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9734 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9736 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9737 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9739 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9740 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9741 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9745 fn test_key_derivation() {
9746 // Test vectors from BOLT 3 Appendix E:
9747 let secp_ctx = Secp256k1::new();
9749 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9750 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9752 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9753 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9755 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9756 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9758 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9759 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9761 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9762 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9764 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9765 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9769 fn test_zero_conf_channel_type_support() {
9770 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9771 let secp_ctx = Secp256k1::new();
9772 let seed = [42; 32];
9773 let network = Network::Testnet;
9774 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9775 let logger = test_utils::TestLogger::new();
9777 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9778 let config = UserConfig::default();
9779 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9780 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9782 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9783 channel_type_features.set_zero_conf_required();
9785 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9786 open_channel_msg.channel_type = Some(channel_type_features);
9787 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9788 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9789 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9790 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9791 assert!(res.is_ok());
9795 fn test_supports_anchors_zero_htlc_tx_fee() {
9796 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9797 // resulting `channel_type`.
9798 let secp_ctx = Secp256k1::new();
9799 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9800 let network = Network::Testnet;
9801 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9802 let logger = test_utils::TestLogger::new();
9804 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9805 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9807 let mut config = UserConfig::default();
9808 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9810 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9811 // need to signal it.
9812 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9813 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9814 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9815 &config, 0, 42, None
9817 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9819 let mut expected_channel_type = ChannelTypeFeatures::empty();
9820 expected_channel_type.set_static_remote_key_required();
9821 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9823 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9824 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9825 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9829 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9830 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9831 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9832 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9833 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9836 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9837 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9841 fn test_rejects_implicit_simple_anchors() {
9842 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9843 // each side's `InitFeatures`, it is rejected.
9844 let secp_ctx = Secp256k1::new();
9845 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9846 let network = Network::Testnet;
9847 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9848 let logger = test_utils::TestLogger::new();
9850 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9851 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9853 let config = UserConfig::default();
9855 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9856 let static_remote_key_required: u64 = 1 << 12;
9857 let simple_anchors_required: u64 = 1 << 20;
9858 let raw_init_features = static_remote_key_required | simple_anchors_required;
9859 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9861 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9862 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9863 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9867 // Set `channel_type` to `None` to force the implicit feature negotiation.
9868 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9869 open_channel_msg.channel_type = None;
9871 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9872 // `static_remote_key`, it will fail the channel.
9873 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9874 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9875 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9876 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9878 assert!(channel_b.is_err());
9882 fn test_rejects_simple_anchors_channel_type() {
9883 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9885 let secp_ctx = Secp256k1::new();
9886 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9887 let network = Network::Testnet;
9888 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9889 let logger = test_utils::TestLogger::new();
9891 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9892 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9894 let config = UserConfig::default();
9896 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9897 let static_remote_key_required: u64 = 1 << 12;
9898 let simple_anchors_required: u64 = 1 << 20;
9899 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9900 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9901 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9902 assert!(!simple_anchors_init.requires_unknown_bits());
9903 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9905 // First, we'll try to open a channel between A and B where A requests a channel type for
9906 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9907 // B as it's not supported by LDK.
9908 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9909 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9910 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9914 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9915 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9917 let res = InboundV1Channel::<&TestKeysInterface>::new(
9918 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9919 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9920 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9922 assert!(res.is_err());
9924 // Then, we'll try to open another channel where A requests a channel type for
9925 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9926 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9928 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9929 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9930 10000000, 100000, 42, &config, 0, 42, None
9933 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9935 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9936 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9937 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9938 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9941 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9942 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9944 let res = channel_a.accept_channel(
9945 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9947 assert!(res.is_err());
9951 fn test_waiting_for_batch() {
9952 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9953 let logger = test_utils::TestLogger::new();
9954 let secp_ctx = Secp256k1::new();
9955 let seed = [42; 32];
9956 let network = Network::Testnet;
9957 let best_block = BestBlock::from_network(network);
9958 let chain_hash = ChainHash::using_genesis_block(network);
9959 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9961 let mut config = UserConfig::default();
9962 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9963 // channel in a batch before all channels are ready.
9964 config.channel_handshake_limits.trust_own_funding_0conf = true;
9966 // Create a channel from node a to node b that will be part of batch funding.
9967 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9968 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9973 &channelmanager::provided_init_features(&config),
9983 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9984 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9985 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9990 &channelmanager::provided_channel_type_features(&config),
9991 &channelmanager::provided_init_features(&config),
9997 true, // Allow node b to send a 0conf channel_ready.
10000 let accept_channel_msg = node_b_chan.accept_inbound_channel();
10001 node_a_chan.accept_channel(
10002 &accept_channel_msg,
10003 &config.channel_handshake_limits,
10004 &channelmanager::provided_init_features(&config),
10007 // Fund the channel with a batch funding transaction.
10008 let output_script = node_a_chan.context.get_funding_redeemscript();
10009 let tx = Transaction {
10011 lock_time: LockTime::ZERO,
10015 value: 10000000, script_pubkey: output_script.clone(),
10018 value: 10000000, script_pubkey: Builder::new().into_script(),
10021 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10022 let funding_created_msg = node_a_chan.get_funding_created(
10023 tx.clone(), funding_outpoint, true, &&logger,
10024 ).map_err(|_| ()).unwrap();
10025 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
10026 &funding_created_msg.unwrap(),
10030 ).map_err(|_| ()).unwrap();
10031 let node_b_updates = node_b_chan.monitor_updating_restored(
10039 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
10040 // broadcasting the funding transaction until the batch is ready.
10041 let res = node_a_chan.funding_signed(
10042 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
10044 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10045 let node_a_updates = node_a_chan.monitor_updating_restored(
10052 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10053 // as the funding transaction depends on all channels in the batch becoming ready.
10054 assert!(node_a_updates.channel_ready.is_none());
10055 assert!(node_a_updates.funding_broadcastable.is_none());
10056 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10058 // It is possible to receive a 0conf channel_ready from the remote node.
10059 node_a_chan.channel_ready(
10060 &node_b_updates.channel_ready.unwrap(),
10068 node_a_chan.context.channel_state,
10069 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10072 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10073 node_a_chan.set_batch_ready();
10074 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10075 assert!(node_a_chan.check_get_channel_ready(0).is_some());