1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
265 sha256_of_onion: [u8; 32],
269 macro_rules! define_state_flags {
270 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
271 #[doc = $flag_type_doc]
272 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
273 struct $flag_type(u32);
278 const $flag: $flag_type = $flag_type($value);
281 /// All flags that apply to the specified [`ChannelState`] variant.
283 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
286 fn new() -> Self { Self(0) }
289 fn from_u32(flags: u32) -> Result<Self, ()> {
290 if flags & !Self::ALL.0 != 0 {
293 Ok($flag_type(flags))
298 fn is_empty(&self) -> bool { self.0 == 0 }
301 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
304 impl core::ops::Not for $flag_type {
306 fn not(self) -> Self::Output { Self(!self.0) }
308 impl core::ops::BitOr for $flag_type {
310 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
312 impl core::ops::BitOrAssign for $flag_type {
313 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
315 impl core::ops::BitAnd for $flag_type {
317 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
319 impl core::ops::BitAndAssign for $flag_type {
320 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
323 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
324 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
326 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
327 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
328 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
330 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
332 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
333 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
335 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
337 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
339 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
340 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
342 impl PartialEq<FundedStateFlags> for $flag_type {
343 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
345 impl From<FundedStateFlags> for $flag_type {
346 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
351 /// We declare all the states/flags here together to help determine which bits are still available
354 pub const OUR_INIT_SENT: u32 = 1 << 0;
355 pub const THEIR_INIT_SENT: u32 = 1 << 1;
356 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
357 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
358 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
359 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
360 pub const CHANNEL_READY: u32 = 1 << 6;
361 pub const PEER_DISCONNECTED: u32 = 1 << 7;
362 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
363 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
364 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
365 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
366 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
367 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
371 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
373 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
374 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
375 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
376 somewhere and we should pause sending any outbound messages until they've managed to \
377 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
378 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
379 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
380 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
381 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
382 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
387 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
388 NegotiatingFundingFlags, [
389 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
390 OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
391 ("Indicates we have received their `open_channel`/`accept_channel` message.",
392 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
397 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
398 FUNDED_STATE, AwaitingChannelReadyFlags, [
399 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
400 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
401 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
402 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
403 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
404 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
405 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
406 is being held until all channels in the batch have received `funding_signed` and have \
407 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
412 "Flags that only apply to [`ChannelState::ChannelReady`].",
413 FUNDED_STATE, ChannelReadyFlags, [
414 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
415 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
416 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
417 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
418 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
422 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
424 /// We are negotiating the parameters required for the channel prior to funding it.
425 NegotiatingFunding(NegotiatingFundingFlags),
426 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
427 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
428 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
430 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
431 /// funding transaction to confirm.
432 AwaitingChannelReady(AwaitingChannelReadyFlags),
433 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
435 ChannelReady(ChannelReadyFlags),
436 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
437 /// is about to drop us, but we store this anyway.
441 macro_rules! impl_state_flag {
442 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
444 fn $get(&self) -> bool {
447 ChannelState::$state(flags) => flags.is_set($state_flag.into()),
456 ChannelState::$state(flags) => *flags |= $state_flag,
458 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
462 fn $clear(&mut self) {
465 ChannelState::$state(flags) => *flags &= !($state_flag),
467 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
471 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
472 impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
474 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
475 impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
480 fn from_u32(state: u32) -> Result<Self, ()> {
482 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
483 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
485 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
486 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
487 .map(|flags| ChannelState::AwaitingChannelReady(flags))
488 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
489 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
490 .map(|flags| ChannelState::ChannelReady(flags))
491 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
492 Ok(ChannelState::NegotiatingFunding(flags))
500 fn to_u32(&self) -> u32 {
502 ChannelState::NegotiatingFunding(flags) => flags.0,
503 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
504 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
505 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
506 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
510 fn is_pre_funded_state(&self) -> bool {
511 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
514 fn is_both_sides_shutdown(&self) -> bool {
515 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
518 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
520 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
521 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
522 _ => FundedStateFlags::new(),
526 fn should_force_holding_cell(&self) -> bool {
528 ChannelState::ChannelReady(flags) =>
529 flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
530 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
531 flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
533 debug_assert!(false, "The holding cell is only valid within ChannelReady");
539 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
540 FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
541 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
542 FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
543 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
544 FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
545 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
546 FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
547 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
548 AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
549 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
550 AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
551 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
552 AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
553 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
554 ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
557 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
559 pub const DEFAULT_MAX_HTLCS: u16 = 50;
561 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
562 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
563 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
564 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
568 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
570 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
572 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
574 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
575 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
576 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
577 /// `holder_max_htlc_value_in_flight_msat`.
578 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
580 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
581 /// `option_support_large_channel` (aka wumbo channels) is not supported.
583 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
585 /// Total bitcoin supply in satoshis.
586 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
588 /// The maximum network dust limit for standard script formats. This currently represents the
589 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
590 /// transaction non-standard and thus refuses to relay it.
591 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
592 /// implementations use this value for their dust limit today.
593 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
595 /// The maximum channel dust limit we will accept from our counterparty.
596 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
598 /// The dust limit is used for both the commitment transaction outputs as well as the closing
599 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
600 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
601 /// In order to avoid having to concern ourselves with standardness during the closing process, we
602 /// simply require our counterparty to use a dust limit which will leave any segwit output
604 /// See <https://github.com/lightning/bolts/issues/905> for more details.
605 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
607 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
608 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
610 /// Used to return a simple Error back to ChannelManager. Will get converted to a
611 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
612 /// channel_id in ChannelManager.
613 pub(super) enum ChannelError {
619 impl fmt::Debug for ChannelError {
620 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
622 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
623 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
624 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
629 impl fmt::Display for ChannelError {
630 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
632 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
633 &ChannelError::Warn(ref e) => write!(f, "{}", e),
634 &ChannelError::Close(ref e) => write!(f, "{}", e),
639 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
641 pub peer_id: Option<PublicKey>,
642 pub channel_id: Option<ChannelId>,
645 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
646 fn log(&self, mut record: Record) {
647 record.peer_id = self.peer_id;
648 record.channel_id = self.channel_id;
649 self.logger.log(record)
653 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
654 where L::Target: Logger {
655 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
656 where S::Target: SignerProvider
660 peer_id: Some(context.counterparty_node_id),
661 channel_id: Some(context.channel_id),
666 macro_rules! secp_check {
667 ($res: expr, $err: expr) => {
670 Err(_) => return Err(ChannelError::Close($err)),
675 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
676 /// our counterparty or not. However, we don't want to announce updates right away to avoid
677 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
678 /// our channel_update message and track the current state here.
679 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
680 #[derive(Clone, Copy, PartialEq)]
681 pub(super) enum ChannelUpdateStatus {
682 /// We've announced the channel as enabled and are connected to our peer.
684 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
686 /// Our channel is live again, but we haven't announced the channel as enabled yet.
688 /// We've announced the channel as disabled.
692 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
694 pub enum AnnouncementSigsState {
695 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
696 /// we sent the last `AnnouncementSignatures`.
698 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
699 /// This state never appears on disk - instead we write `NotSent`.
701 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
702 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
703 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
704 /// they send back a `RevokeAndACK`.
705 /// This state never appears on disk - instead we write `NotSent`.
707 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
708 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
712 /// An enum indicating whether the local or remote side offered a given HTLC.
718 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
721 pending_htlcs_value_msat: u64,
722 on_counterparty_tx_dust_exposure_msat: u64,
723 on_holder_tx_dust_exposure_msat: u64,
724 holding_cell_msat: u64,
725 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
728 /// An enum gathering stats on commitment transaction, either local or remote.
729 struct CommitmentStats<'a> {
730 tx: CommitmentTransaction, // the transaction info
731 feerate_per_kw: u32, // the feerate included to build the transaction
732 total_fee_sat: u64, // the total fee included in the transaction
733 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
734 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
735 local_balance_msat: u64, // local balance before fees but considering dust limits
736 remote_balance_msat: u64, // remote balance before fees but considering dust limits
737 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
738 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
741 /// Used when calculating whether we or the remote can afford an additional HTLC.
742 struct HTLCCandidate {
744 origin: HTLCInitiator,
748 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
756 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
758 enum UpdateFulfillFetch {
760 monitor_update: ChannelMonitorUpdate,
761 htlc_value_msat: u64,
762 msg: Option<msgs::UpdateFulfillHTLC>,
767 /// The return type of get_update_fulfill_htlc_and_commit.
768 pub enum UpdateFulfillCommitFetch {
769 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
770 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
771 /// previously placed in the holding cell (and has since been removed).
773 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
774 monitor_update: ChannelMonitorUpdate,
775 /// The value of the HTLC which was claimed, in msat.
776 htlc_value_msat: u64,
778 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
779 /// or has been forgotten (presumably previously claimed).
783 /// The return value of `monitor_updating_restored`
784 pub(super) struct MonitorRestoreUpdates {
785 pub raa: Option<msgs::RevokeAndACK>,
786 pub commitment_update: Option<msgs::CommitmentUpdate>,
787 pub order: RAACommitmentOrder,
788 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
789 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
790 pub finalized_claimed_htlcs: Vec<HTLCSource>,
791 pub funding_broadcastable: Option<Transaction>,
792 pub channel_ready: Option<msgs::ChannelReady>,
793 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
796 /// The return value of `signer_maybe_unblocked`
798 pub(super) struct SignerResumeUpdates {
799 pub commitment_update: Option<msgs::CommitmentUpdate>,
800 pub funding_signed: Option<msgs::FundingSigned>,
801 pub channel_ready: Option<msgs::ChannelReady>,
804 /// The return value of `channel_reestablish`
805 pub(super) struct ReestablishResponses {
806 pub channel_ready: Option<msgs::ChannelReady>,
807 pub raa: Option<msgs::RevokeAndACK>,
808 pub commitment_update: Option<msgs::CommitmentUpdate>,
809 pub order: RAACommitmentOrder,
810 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
811 pub shutdown_msg: Option<msgs::Shutdown>,
814 /// The result of a shutdown that should be handled.
816 pub(crate) struct ShutdownResult {
817 /// A channel monitor update to apply.
818 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
819 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
820 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
821 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
822 /// propagated to the remainder of the batch.
823 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
824 pub(crate) channel_id: ChannelId,
825 pub(crate) counterparty_node_id: PublicKey,
826 pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
829 /// If the majority of the channels funds are to the fundee and the initiator holds only just
830 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
831 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
832 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
833 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
834 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
835 /// by this multiple without hitting this case, before sending.
836 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
837 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
838 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
839 /// leave the channel less usable as we hold a bigger reserve.
840 #[cfg(any(fuzzing, test))]
841 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
842 #[cfg(not(any(fuzzing, test)))]
843 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
845 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
846 /// channel creation on an inbound channel, we simply force-close and move on.
847 /// This constant is the one suggested in BOLT 2.
848 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
850 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
851 /// not have enough balance value remaining to cover the onchain cost of this new
852 /// HTLC weight. If this happens, our counterparty fails the reception of our
853 /// commitment_signed including this new HTLC due to infringement on the channel
855 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
856 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
857 /// leads to a channel force-close. Ultimately, this is an issue coming from the
858 /// design of LN state machines, allowing asynchronous updates.
859 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
861 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
862 /// commitment transaction fees, with at least this many HTLCs present on the commitment
863 /// transaction (not counting the value of the HTLCs themselves).
864 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
866 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
867 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
868 /// ChannelUpdate prompted by the config update. This value was determined as follows:
870 /// * The expected interval between ticks (1 minute).
871 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
872 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
873 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
874 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
876 /// The number of ticks that may elapse while we're waiting for a response to a
877 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
880 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
881 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
883 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
884 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
885 /// exceeding this age limit will be force-closed and purged from memory.
886 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
888 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
889 pub(crate) const COINBASE_MATURITY: u32 = 100;
891 struct PendingChannelMonitorUpdate {
892 update: ChannelMonitorUpdate,
895 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
896 (0, update, required),
899 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
900 /// its variants containing an appropriate channel struct.
901 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
902 UnfundedOutboundV1(OutboundV1Channel<SP>),
903 UnfundedInboundV1(InboundV1Channel<SP>),
907 impl<'a, SP: Deref> ChannelPhase<SP> where
908 SP::Target: SignerProvider,
909 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
911 pub fn context(&'a self) -> &'a ChannelContext<SP> {
913 ChannelPhase::Funded(chan) => &chan.context,
914 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
915 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
919 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
921 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
922 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
923 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
928 /// Contains all state common to unfunded inbound/outbound channels.
929 pub(super) struct UnfundedChannelContext {
930 /// A counter tracking how many ticks have elapsed since this unfunded channel was
931 /// created. If this unfunded channel reaches peer has yet to respond after reaching
932 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
934 /// This is so that we don't keep channels around that haven't progressed to a funded state
935 /// in a timely manner.
936 unfunded_channel_age_ticks: usize,
939 impl UnfundedChannelContext {
940 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
941 /// having reached the unfunded channel age limit.
943 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
944 pub fn should_expire_unfunded_channel(&mut self) -> bool {
945 self.unfunded_channel_age_ticks += 1;
946 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
950 /// Contains everything about the channel including state, and various flags.
951 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
952 config: LegacyChannelConfig,
954 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
955 // constructed using it. The second element in the tuple corresponds to the number of ticks that
956 // have elapsed since the update occurred.
957 prev_config: Option<(ChannelConfig, usize)>,
959 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
963 /// The current channel ID.
964 channel_id: ChannelId,
965 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
966 /// Will be `None` for channels created prior to 0.0.115.
967 temporary_channel_id: Option<ChannelId>,
968 channel_state: ChannelState,
970 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
971 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
973 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
974 // Note that a number of our tests were written prior to the behavior here which retransmits
975 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
977 #[cfg(any(test, feature = "_test_utils"))]
978 pub(crate) announcement_sigs_state: AnnouncementSigsState,
979 #[cfg(not(any(test, feature = "_test_utils")))]
980 announcement_sigs_state: AnnouncementSigsState,
982 secp_ctx: Secp256k1<secp256k1::All>,
983 channel_value_satoshis: u64,
985 latest_monitor_update_id: u64,
987 holder_signer: ChannelSignerType<SP>,
988 shutdown_scriptpubkey: Option<ShutdownScript>,
989 destination_script: ScriptBuf,
991 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
992 // generation start at 0 and count up...this simplifies some parts of implementation at the
993 // cost of others, but should really just be changed.
995 cur_holder_commitment_transaction_number: u64,
996 cur_counterparty_commitment_transaction_number: u64,
997 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
998 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
999 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
1000 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1002 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1003 /// need to ensure we resend them in the order we originally generated them. Note that because
1004 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1005 /// sufficient to simply set this to the opposite of any message we are generating as we
1006 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1007 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1009 resend_order: RAACommitmentOrder,
1011 monitor_pending_channel_ready: bool,
1012 monitor_pending_revoke_and_ack: bool,
1013 monitor_pending_commitment_signed: bool,
1015 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1016 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1017 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1018 // but need to handle this somehow or we run the risk of losing HTLCs!
1019 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1020 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1021 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1023 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1024 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1025 /// the future when the signer indicates it may have a signature for us.
1027 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1028 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1029 signer_pending_commitment_update: bool,
1030 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1031 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1032 /// outbound or inbound.
1033 signer_pending_funding: bool,
1035 // pending_update_fee is filled when sending and receiving update_fee.
1037 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1038 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1039 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1040 // HTLCs with similar state.
1041 pending_update_fee: Option<(u32, FeeUpdateState)>,
1042 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1043 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1044 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1045 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1046 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1047 holding_cell_update_fee: Option<u32>,
1048 next_holder_htlc_id: u64,
1049 next_counterparty_htlc_id: u64,
1050 feerate_per_kw: u32,
1052 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1053 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1054 /// new block is received, ensuring it's always at least moderately close to the current real
1056 update_time_counter: u32,
1058 #[cfg(debug_assertions)]
1059 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1060 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1061 #[cfg(debug_assertions)]
1062 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1063 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1065 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1066 target_closing_feerate_sats_per_kw: Option<u32>,
1068 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1069 /// update, we need to delay processing it until later. We do that here by simply storing the
1070 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1071 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1073 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1074 /// transaction. These are set once we reach `closing_negotiation_ready`.
1076 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1078 closing_fee_limits: Option<(u64, u64)>,
1080 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1081 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1082 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1083 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1084 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1086 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1087 /// until we see a `commitment_signed` before doing so.
1089 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1090 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1091 expecting_peer_commitment_signed: bool,
1093 /// The hash of the block in which the funding transaction was included.
1094 funding_tx_confirmed_in: Option<BlockHash>,
1095 funding_tx_confirmation_height: u32,
1096 short_channel_id: Option<u64>,
1097 /// Either the height at which this channel was created or the height at which it was last
1098 /// serialized if it was serialized by versions prior to 0.0.103.
1099 /// We use this to close if funding is never broadcasted.
1100 channel_creation_height: u32,
1102 counterparty_dust_limit_satoshis: u64,
1105 pub(super) holder_dust_limit_satoshis: u64,
1107 holder_dust_limit_satoshis: u64,
1110 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1112 counterparty_max_htlc_value_in_flight_msat: u64,
1115 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1117 holder_max_htlc_value_in_flight_msat: u64,
1119 /// minimum channel reserve for self to maintain - set by them.
1120 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1123 pub(super) holder_selected_channel_reserve_satoshis: u64,
1125 holder_selected_channel_reserve_satoshis: u64,
1127 counterparty_htlc_minimum_msat: u64,
1128 holder_htlc_minimum_msat: u64,
1130 pub counterparty_max_accepted_htlcs: u16,
1132 counterparty_max_accepted_htlcs: u16,
1133 holder_max_accepted_htlcs: u16,
1134 minimum_depth: Option<u32>,
1136 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1138 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1139 funding_transaction: Option<Transaction>,
1140 is_batch_funding: Option<()>,
1142 counterparty_cur_commitment_point: Option<PublicKey>,
1143 counterparty_prev_commitment_point: Option<PublicKey>,
1144 counterparty_node_id: PublicKey,
1146 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1148 commitment_secrets: CounterpartyCommitmentSecrets,
1150 channel_update_status: ChannelUpdateStatus,
1151 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1152 /// not complete within a single timer tick (one minute), we should force-close the channel.
1153 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1155 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1156 /// our peer and start the closing_signed negotiation fresh.
1157 closing_signed_in_flight: bool,
1159 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1160 /// This can be used to rebroadcast the channel_announcement message later.
1161 announcement_sigs: Option<(Signature, Signature)>,
1163 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1164 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1165 // be, by comparing the cached values to the fee of the tranaction generated by
1166 // `build_commitment_transaction`.
1167 #[cfg(any(test, fuzzing))]
1168 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1169 #[cfg(any(test, fuzzing))]
1170 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1172 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1173 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1174 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1175 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1176 /// message until we receive a channel_reestablish.
1178 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1179 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1181 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1182 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1183 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1184 /// unblock the state machine.
1186 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1187 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1188 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1190 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1191 /// [`msgs::RevokeAndACK`] message from the counterparty.
1192 sent_message_awaiting_response: Option<usize>,
1194 #[cfg(any(test, fuzzing))]
1195 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1196 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1197 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1198 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1199 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1200 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1201 historical_inbound_htlc_fulfills: HashSet<u64>,
1203 /// This channel's type, as negotiated during channel open
1204 channel_type: ChannelTypeFeatures,
1206 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1207 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1208 // the channel's funding UTXO.
1210 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1211 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1212 // associated channel mapping.
1214 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1215 // to store all of them.
1216 latest_inbound_scid_alias: Option<u64>,
1218 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1219 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1220 // don't currently support node id aliases and eventually privacy should be provided with
1221 // blinded paths instead of simple scid+node_id aliases.
1222 outbound_scid_alias: u64,
1224 // We track whether we already emitted a `ChannelPending` event.
1225 channel_pending_event_emitted: bool,
1227 // We track whether we already emitted a `ChannelReady` event.
1228 channel_ready_event_emitted: bool,
1230 /// The unique identifier used to re-derive the private key material for the channel through
1231 /// [`SignerProvider::derive_channel_signer`].
1232 channel_keys_id: [u8; 32],
1234 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1235 /// store it here and only release it to the `ChannelManager` once it asks for it.
1236 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1239 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1240 /// Allowed in any state (including after shutdown)
1241 pub fn get_update_time_counter(&self) -> u32 {
1242 self.update_time_counter
1245 pub fn get_latest_monitor_update_id(&self) -> u64 {
1246 self.latest_monitor_update_id
1249 pub fn should_announce(&self) -> bool {
1250 self.config.announced_channel
1253 pub fn is_outbound(&self) -> bool {
1254 self.channel_transaction_parameters.is_outbound_from_holder
1257 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1258 /// Allowed in any state (including after shutdown)
1259 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1260 self.config.options.forwarding_fee_base_msat
1263 /// Returns true if we've ever received a message from the remote end for this Channel
1264 pub fn have_received_message(&self) -> bool {
1265 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1268 /// Returns true if this channel is fully established and not known to be closing.
1269 /// Allowed in any state (including after shutdown)
1270 pub fn is_usable(&self) -> bool {
1271 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1272 !self.channel_state.is_local_shutdown_sent() &&
1273 !self.channel_state.is_remote_shutdown_sent() &&
1274 !self.monitor_pending_channel_ready
1277 /// shutdown state returns the state of the channel in its various stages of shutdown
1278 pub fn shutdown_state(&self) -> ChannelShutdownState {
1279 match self.channel_state {
1280 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1281 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1282 ChannelShutdownState::ShutdownInitiated
1283 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1284 ChannelShutdownState::ResolvingHTLCs
1285 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1286 ChannelShutdownState::NegotiatingClosingFee
1288 ChannelShutdownState::NotShuttingDown
1290 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1291 _ => ChannelShutdownState::NotShuttingDown,
1295 fn closing_negotiation_ready(&self) -> bool {
1296 let is_ready_to_close = match self.channel_state {
1297 ChannelState::AwaitingChannelReady(flags) =>
1298 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1299 ChannelState::ChannelReady(flags) =>
1300 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1303 self.pending_inbound_htlcs.is_empty() &&
1304 self.pending_outbound_htlcs.is_empty() &&
1305 self.pending_update_fee.is_none() &&
1309 /// Returns true if this channel is currently available for use. This is a superset of
1310 /// is_usable() and considers things like the channel being temporarily disabled.
1311 /// Allowed in any state (including after shutdown)
1312 pub fn is_live(&self) -> bool {
1313 self.is_usable() && !self.channel_state.is_peer_disconnected()
1316 // Public utilities:
1318 pub fn channel_id(&self) -> ChannelId {
1322 // Return the `temporary_channel_id` used during channel establishment.
1324 // Will return `None` for channels created prior to LDK version 0.0.115.
1325 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1326 self.temporary_channel_id
1329 pub fn minimum_depth(&self) -> Option<u32> {
1333 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1334 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1335 pub fn get_user_id(&self) -> u128 {
1339 /// Gets the channel's type
1340 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1344 /// Gets the channel's `short_channel_id`.
1346 /// Will return `None` if the channel hasn't been confirmed yet.
1347 pub fn get_short_channel_id(&self) -> Option<u64> {
1348 self.short_channel_id
1351 /// Allowed in any state (including after shutdown)
1352 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1353 self.latest_inbound_scid_alias
1356 /// Allowed in any state (including after shutdown)
1357 pub fn outbound_scid_alias(&self) -> u64 {
1358 self.outbound_scid_alias
1361 /// Returns the holder signer for this channel.
1363 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1364 return &self.holder_signer
1367 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1368 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1369 /// or prior to any channel actions during `Channel` initialization.
1370 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1371 debug_assert_eq!(self.outbound_scid_alias, 0);
1372 self.outbound_scid_alias = outbound_scid_alias;
1375 /// Returns the funding_txo we either got from our peer, or were given by
1376 /// get_funding_created.
1377 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1378 self.channel_transaction_parameters.funding_outpoint
1381 /// Returns the height in which our funding transaction was confirmed.
1382 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1383 let conf_height = self.funding_tx_confirmation_height;
1384 if conf_height > 0 {
1391 /// Returns the block hash in which our funding transaction was confirmed.
1392 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1393 self.funding_tx_confirmed_in
1396 /// Returns the current number of confirmations on the funding transaction.
1397 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1398 if self.funding_tx_confirmation_height == 0 {
1399 // We either haven't seen any confirmation yet, or observed a reorg.
1403 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1406 fn get_holder_selected_contest_delay(&self) -> u16 {
1407 self.channel_transaction_parameters.holder_selected_contest_delay
1410 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1411 &self.channel_transaction_parameters.holder_pubkeys
1414 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1415 self.channel_transaction_parameters.counterparty_parameters
1416 .as_ref().map(|params| params.selected_contest_delay)
1419 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1420 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1423 /// Allowed in any state (including after shutdown)
1424 pub fn get_counterparty_node_id(&self) -> PublicKey {
1425 self.counterparty_node_id
1428 /// Allowed in any state (including after shutdown)
1429 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1430 self.holder_htlc_minimum_msat
1433 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1434 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1435 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1438 /// Allowed in any state (including after shutdown)
1439 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1441 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1442 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1443 // channel might have been used to route very small values (either by honest users or as DoS).
1444 self.channel_value_satoshis * 1000 * 9 / 10,
1446 self.counterparty_max_htlc_value_in_flight_msat
1450 /// Allowed in any state (including after shutdown)
1451 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1452 self.counterparty_htlc_minimum_msat
1455 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1456 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1457 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1460 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1461 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1462 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1464 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1465 party_max_htlc_value_in_flight_msat
1470 pub fn get_value_satoshis(&self) -> u64 {
1471 self.channel_value_satoshis
1474 pub fn get_fee_proportional_millionths(&self) -> u32 {
1475 self.config.options.forwarding_fee_proportional_millionths
1478 pub fn get_cltv_expiry_delta(&self) -> u16 {
1479 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1482 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1483 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1484 where F::Target: FeeEstimator
1486 match self.config.options.max_dust_htlc_exposure {
1487 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1488 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1489 ConfirmationTarget::OnChainSweep) as u64;
1490 feerate_per_kw.saturating_mul(multiplier)
1492 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1496 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1497 pub fn prev_config(&self) -> Option<ChannelConfig> {
1498 self.prev_config.map(|prev_config| prev_config.0)
1501 // Checks whether we should emit a `ChannelPending` event.
1502 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1503 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1506 // Returns whether we already emitted a `ChannelPending` event.
1507 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1508 self.channel_pending_event_emitted
1511 // Remembers that we already emitted a `ChannelPending` event.
1512 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1513 self.channel_pending_event_emitted = true;
1516 // Checks whether we should emit a `ChannelReady` event.
1517 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1518 self.is_usable() && !self.channel_ready_event_emitted
1521 // Remembers that we already emitted a `ChannelReady` event.
1522 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1523 self.channel_ready_event_emitted = true;
1526 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1527 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1528 /// no longer be considered when forwarding HTLCs.
1529 pub fn maybe_expire_prev_config(&mut self) {
1530 if self.prev_config.is_none() {
1533 let prev_config = self.prev_config.as_mut().unwrap();
1535 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1536 self.prev_config = None;
1540 /// Returns the current [`ChannelConfig`] applied to the channel.
1541 pub fn config(&self) -> ChannelConfig {
1545 /// Updates the channel's config. A bool is returned indicating whether the config update
1546 /// applied resulted in a new ChannelUpdate message.
1547 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1548 let did_channel_update =
1549 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1550 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1551 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1552 if did_channel_update {
1553 self.prev_config = Some((self.config.options, 0));
1554 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1555 // policy change to propagate throughout the network.
1556 self.update_time_counter += 1;
1558 self.config.options = *config;
1562 /// Returns true if funding_signed was sent/received and the
1563 /// funding transaction has been broadcast if necessary.
1564 pub fn is_funding_broadcast(&self) -> bool {
1565 !self.channel_state.is_pre_funded_state() &&
1566 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1569 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1570 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1571 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1572 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1573 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1575 /// @local is used only to convert relevant internal structures which refer to remote vs local
1576 /// to decide value of outputs and direction of HTLCs.
1577 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1578 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1579 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1580 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1581 /// which peer generated this transaction and "to whom" this transaction flows.
1583 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1584 where L::Target: Logger
1586 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1587 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1588 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1590 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1591 let mut remote_htlc_total_msat = 0;
1592 let mut local_htlc_total_msat = 0;
1593 let mut value_to_self_msat_offset = 0;
1595 let mut feerate_per_kw = self.feerate_per_kw;
1596 if let Some((feerate, update_state)) = self.pending_update_fee {
1597 if match update_state {
1598 // Note that these match the inclusion criteria when scanning
1599 // pending_inbound_htlcs below.
1600 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1601 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1602 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1604 feerate_per_kw = feerate;
1608 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1609 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1610 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1612 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1614 macro_rules! get_htlc_in_commitment {
1615 ($htlc: expr, $offered: expr) => {
1616 HTLCOutputInCommitment {
1618 amount_msat: $htlc.amount_msat,
1619 cltv_expiry: $htlc.cltv_expiry,
1620 payment_hash: $htlc.payment_hash,
1621 transaction_output_index: None
1626 macro_rules! add_htlc_output {
1627 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1628 if $outbound == local { // "offered HTLC output"
1629 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1630 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1633 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1635 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1636 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1637 included_non_dust_htlcs.push((htlc_in_tx, $source));
1639 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1640 included_dust_htlcs.push((htlc_in_tx, $source));
1643 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1644 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1647 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1649 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1650 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1651 included_non_dust_htlcs.push((htlc_in_tx, $source));
1653 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1654 included_dust_htlcs.push((htlc_in_tx, $source));
1660 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1662 for ref htlc in self.pending_inbound_htlcs.iter() {
1663 let (include, state_name) = match htlc.state {
1664 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1665 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1666 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1667 InboundHTLCState::Committed => (true, "Committed"),
1668 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1672 add_htlc_output!(htlc, false, None, state_name);
1673 remote_htlc_total_msat += htlc.amount_msat;
1675 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1677 &InboundHTLCState::LocalRemoved(ref reason) => {
1678 if generated_by_local {
1679 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1680 inbound_htlc_preimages.push(preimage);
1681 value_to_self_msat_offset += htlc.amount_msat as i64;
1691 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1693 for ref htlc in self.pending_outbound_htlcs.iter() {
1694 let (include, state_name) = match htlc.state {
1695 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1696 OutboundHTLCState::Committed => (true, "Committed"),
1697 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1698 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1699 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1702 let preimage_opt = match htlc.state {
1703 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1704 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1705 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1709 if let Some(preimage) = preimage_opt {
1710 outbound_htlc_preimages.push(preimage);
1714 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1715 local_htlc_total_msat += htlc.amount_msat;
1717 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1719 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1720 value_to_self_msat_offset -= htlc.amount_msat as i64;
1722 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1723 if !generated_by_local {
1724 value_to_self_msat_offset -= htlc.amount_msat as i64;
1732 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1733 assert!(value_to_self_msat >= 0);
1734 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1735 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1736 // "violate" their reserve value by couting those against it. Thus, we have to convert
1737 // everything to i64 before subtracting as otherwise we can overflow.
1738 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1739 assert!(value_to_remote_msat >= 0);
1741 #[cfg(debug_assertions)]
1743 // Make sure that the to_self/to_remote is always either past the appropriate
1744 // channel_reserve *or* it is making progress towards it.
1745 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1746 self.holder_max_commitment_tx_output.lock().unwrap()
1748 self.counterparty_max_commitment_tx_output.lock().unwrap()
1750 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1751 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1752 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1753 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1756 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1757 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1758 let (value_to_self, value_to_remote) = if self.is_outbound() {
1759 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1761 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1764 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1765 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1766 let (funding_pubkey_a, funding_pubkey_b) = if local {
1767 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1769 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1772 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1773 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1778 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1779 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1784 let num_nondust_htlcs = included_non_dust_htlcs.len();
1786 let channel_parameters =
1787 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1788 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1789 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1796 &mut included_non_dust_htlcs,
1799 let mut htlcs_included = included_non_dust_htlcs;
1800 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1801 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1802 htlcs_included.append(&mut included_dust_htlcs);
1804 // For the stats, trimmed-to-0 the value in msats accordingly
1805 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1806 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1814 local_balance_msat: value_to_self_msat as u64,
1815 remote_balance_msat: value_to_remote_msat as u64,
1816 inbound_htlc_preimages,
1817 outbound_htlc_preimages,
1822 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1823 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1824 /// our counterparty!)
1825 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1826 /// TODO Some magic rust shit to compile-time check this?
1827 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1828 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1829 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1830 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1831 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1833 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1837 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1838 /// will sign and send to our counterparty.
1839 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1840 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1841 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1842 //may see payments to it!
1843 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1844 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1845 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1847 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1850 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1851 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1852 /// Panics if called before accept_channel/InboundV1Channel::new
1853 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1854 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1857 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1858 &self.get_counterparty_pubkeys().funding_pubkey
1861 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1865 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1866 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1867 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1868 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1869 // more dust balance if the feerate increases when we have several HTLCs pending
1870 // which are near the dust limit.
1871 let mut feerate_per_kw = self.feerate_per_kw;
1872 // If there's a pending update fee, use it to ensure we aren't under-estimating
1873 // potential feerate updates coming soon.
1874 if let Some((feerate, _)) = self.pending_update_fee {
1875 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1877 if let Some(feerate) = outbound_feerate_update {
1878 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1880 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1883 /// Get forwarding information for the counterparty.
1884 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1885 self.counterparty_forwarding_info.clone()
1888 /// Returns a HTLCStats about inbound pending htlcs
1889 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1891 let mut stats = HTLCStats {
1892 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1893 pending_htlcs_value_msat: 0,
1894 on_counterparty_tx_dust_exposure_msat: 0,
1895 on_holder_tx_dust_exposure_msat: 0,
1896 holding_cell_msat: 0,
1897 on_holder_tx_holding_cell_htlcs_count: 0,
1900 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1903 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1904 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1905 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1907 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1908 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1909 for ref htlc in context.pending_inbound_htlcs.iter() {
1910 stats.pending_htlcs_value_msat += htlc.amount_msat;
1911 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1912 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1914 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1915 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1921 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1922 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1924 let mut stats = HTLCStats {
1925 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1926 pending_htlcs_value_msat: 0,
1927 on_counterparty_tx_dust_exposure_msat: 0,
1928 on_holder_tx_dust_exposure_msat: 0,
1929 holding_cell_msat: 0,
1930 on_holder_tx_holding_cell_htlcs_count: 0,
1933 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1936 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1937 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1938 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1940 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1941 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1942 for ref htlc in context.pending_outbound_htlcs.iter() {
1943 stats.pending_htlcs_value_msat += htlc.amount_msat;
1944 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1945 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1947 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1948 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1952 for update in context.holding_cell_htlc_updates.iter() {
1953 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1954 stats.pending_htlcs += 1;
1955 stats.pending_htlcs_value_msat += amount_msat;
1956 stats.holding_cell_msat += amount_msat;
1957 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1958 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1960 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1961 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1963 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1970 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1971 /// Doesn't bother handling the
1972 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1973 /// corner case properly.
1974 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1975 -> AvailableBalances
1976 where F::Target: FeeEstimator
1978 let context = &self;
1979 // Note that we have to handle overflow due to the above case.
1980 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1981 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1983 let mut balance_msat = context.value_to_self_msat;
1984 for ref htlc in context.pending_inbound_htlcs.iter() {
1985 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1986 balance_msat += htlc.amount_msat;
1989 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1991 let outbound_capacity_msat = context.value_to_self_msat
1992 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1994 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1996 let mut available_capacity_msat = outbound_capacity_msat;
1998 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1999 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2003 if context.is_outbound() {
2004 // We should mind channel commit tx fee when computing how much of the available capacity
2005 // can be used in the next htlc. Mirrors the logic in send_htlc.
2007 // The fee depends on whether the amount we will be sending is above dust or not,
2008 // and the answer will in turn change the amount itself — making it a circular
2010 // This complicates the computation around dust-values, up to the one-htlc-value.
2011 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2012 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2013 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2016 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2017 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2018 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2019 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2020 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2021 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2022 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2025 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2026 // value ends up being below dust, we have this fee available again. In that case,
2027 // match the value to right-below-dust.
2028 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2029 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2030 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2031 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2032 debug_assert!(one_htlc_difference_msat != 0);
2033 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2034 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2035 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2037 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2040 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2041 // sending a new HTLC won't reduce their balance below our reserve threshold.
2042 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2043 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2044 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2047 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2048 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2050 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2051 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2052 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2054 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2055 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2056 // we've selected for them, we can only send dust HTLCs.
2057 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2061 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2063 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2064 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2065 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2066 // send above the dust limit (as the router can always overpay to meet the dust limit).
2067 let mut remaining_msat_below_dust_exposure_limit = None;
2068 let mut dust_exposure_dust_limit_msat = 0;
2069 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2071 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2072 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2074 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2075 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2076 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2078 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2079 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2080 remaining_msat_below_dust_exposure_limit =
2081 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2082 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2085 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2086 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2087 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2088 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2089 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2090 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2093 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2094 if available_capacity_msat < dust_exposure_dust_limit_msat {
2095 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2097 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2101 available_capacity_msat = cmp::min(available_capacity_msat,
2102 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2104 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2105 available_capacity_msat = 0;
2109 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2110 - context.value_to_self_msat as i64
2111 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2112 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2114 outbound_capacity_msat,
2115 next_outbound_htlc_limit_msat: available_capacity_msat,
2116 next_outbound_htlc_minimum_msat,
2121 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2122 let context = &self;
2123 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2126 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2127 /// number of pending HTLCs that are on track to be in our next commitment tx.
2129 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2130 /// `fee_spike_buffer_htlc` is `Some`.
2132 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2133 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2135 /// Dust HTLCs are excluded.
2136 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2137 let context = &self;
2138 assert!(context.is_outbound());
2140 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2143 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2144 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2146 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2147 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2149 let mut addl_htlcs = 0;
2150 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2152 HTLCInitiator::LocalOffered => {
2153 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2157 HTLCInitiator::RemoteOffered => {
2158 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2164 let mut included_htlcs = 0;
2165 for ref htlc in context.pending_inbound_htlcs.iter() {
2166 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2169 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2170 // transaction including this HTLC if it times out before they RAA.
2171 included_htlcs += 1;
2174 for ref htlc in context.pending_outbound_htlcs.iter() {
2175 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2179 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2180 OutboundHTLCState::Committed => included_htlcs += 1,
2181 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2182 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2183 // transaction won't be generated until they send us their next RAA, which will mean
2184 // dropping any HTLCs in this state.
2189 for htlc in context.holding_cell_htlc_updates.iter() {
2191 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2192 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2197 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2198 // ack we're guaranteed to never include them in commitment txs anymore.
2202 let num_htlcs = included_htlcs + addl_htlcs;
2203 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2204 #[cfg(any(test, fuzzing))]
2207 if fee_spike_buffer_htlc.is_some() {
2208 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2210 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2211 + context.holding_cell_htlc_updates.len();
2212 let commitment_tx_info = CommitmentTxInfoCached {
2214 total_pending_htlcs,
2215 next_holder_htlc_id: match htlc.origin {
2216 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2217 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2219 next_counterparty_htlc_id: match htlc.origin {
2220 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2221 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2223 feerate: context.feerate_per_kw,
2225 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2230 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2231 /// pending HTLCs that are on track to be in their next commitment tx
2233 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2234 /// `fee_spike_buffer_htlc` is `Some`.
2236 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2237 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2239 /// Dust HTLCs are excluded.
2240 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2241 let context = &self;
2242 assert!(!context.is_outbound());
2244 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2247 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2248 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2250 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2251 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2253 let mut addl_htlcs = 0;
2254 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2256 HTLCInitiator::LocalOffered => {
2257 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2261 HTLCInitiator::RemoteOffered => {
2262 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2268 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2269 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2270 // committed outbound HTLCs, see below.
2271 let mut included_htlcs = 0;
2272 for ref htlc in context.pending_inbound_htlcs.iter() {
2273 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2276 included_htlcs += 1;
2279 for ref htlc in context.pending_outbound_htlcs.iter() {
2280 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2283 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2284 // i.e. if they've responded to us with an RAA after announcement.
2286 OutboundHTLCState::Committed => included_htlcs += 1,
2287 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2288 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2293 let num_htlcs = included_htlcs + addl_htlcs;
2294 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2295 #[cfg(any(test, fuzzing))]
2298 if fee_spike_buffer_htlc.is_some() {
2299 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2301 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2302 let commitment_tx_info = CommitmentTxInfoCached {
2304 total_pending_htlcs,
2305 next_holder_htlc_id: match htlc.origin {
2306 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2307 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2309 next_counterparty_htlc_id: match htlc.origin {
2310 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2311 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2313 feerate: context.feerate_per_kw,
2315 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2320 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
2321 match self.channel_state {
2322 ChannelState::FundingNegotiated => f(),
2323 ChannelState::AwaitingChannelReady(flags) =>
2324 if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
2325 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
2335 /// Returns the transaction if there is a pending funding transaction that is yet to be
2337 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2338 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2341 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2343 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2344 self.if_unbroadcasted_funding(||
2345 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2349 /// Returns whether the channel is funded in a batch.
2350 pub fn is_batch_funding(&self) -> bool {
2351 self.is_batch_funding.is_some()
2354 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2356 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2357 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2360 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2361 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2362 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2363 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2364 /// immediately (others we will have to allow to time out).
2365 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2366 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2367 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2368 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2369 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2370 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2372 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2373 // return them to fail the payment.
2374 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2375 let counterparty_node_id = self.get_counterparty_node_id();
2376 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2378 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2379 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2384 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2385 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2386 // returning a channel monitor update here would imply a channel monitor update before
2387 // we even registered the channel monitor to begin with, which is invalid.
2388 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2389 // funding transaction, don't return a funding txo (which prevents providing the
2390 // monitor update to the user, even if we return one).
2391 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2392 let generate_monitor_update = match self.channel_state {
2393 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
2396 if generate_monitor_update {
2397 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2398 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2399 update_id: self.latest_monitor_update_id,
2400 counterparty_node_id: Some(self.counterparty_node_id),
2401 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2405 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2406 let unbroadcasted_funding_tx = self.unbroadcasted_funding();
2408 self.channel_state = ChannelState::ShutdownComplete;
2409 self.update_time_counter += 1;
2412 dropped_outbound_htlcs,
2413 unbroadcasted_batch_funding_txid,
2414 channel_id: self.channel_id,
2415 counterparty_node_id: self.counterparty_node_id,
2416 unbroadcasted_funding_tx,
2420 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2421 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2422 let counterparty_keys = self.build_remote_transaction_keys();
2423 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2425 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2426 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2427 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2428 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2430 match &self.holder_signer {
2431 // TODO (arik): move match into calling method for Taproot
2432 ChannelSignerType::Ecdsa(ecdsa) => {
2433 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2434 .map(|(signature, _)| msgs::FundingSigned {
2435 channel_id: self.channel_id(),
2438 partial_signature_with_nonce: None,
2442 if funding_signed.is_none() {
2443 #[cfg(not(async_signing))] {
2444 panic!("Failed to get signature for funding_signed");
2446 #[cfg(async_signing)] {
2447 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2448 self.signer_pending_funding = true;
2450 } else if self.signer_pending_funding {
2451 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2452 self.signer_pending_funding = false;
2455 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2456 (counterparty_initial_commitment_tx, funding_signed)
2458 // TODO (taproot|arik)
2465 // Internal utility functions for channels
2467 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2468 /// `channel_value_satoshis` in msat, set through
2469 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2471 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2473 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2474 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2475 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2477 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2480 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2482 channel_value_satoshis * 10 * configured_percent
2485 /// Returns a minimum channel reserve value the remote needs to maintain,
2486 /// required by us according to the configured or default
2487 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2489 /// Guaranteed to return a value no larger than channel_value_satoshis
2491 /// This is used both for outbound and inbound channels and has lower bound
2492 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2493 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2494 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2495 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2498 /// This is for legacy reasons, present for forward-compatibility.
2499 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2500 /// from storage. Hence, we use this function to not persist default values of
2501 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2502 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2503 let (q, _) = channel_value_satoshis.overflowing_div(100);
2504 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2507 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2508 // Note that num_htlcs should not include dust HTLCs.
2510 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2511 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2514 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2515 // Note that num_htlcs should not include dust HTLCs.
2516 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2517 // Note that we need to divide before multiplying to round properly,
2518 // since the lowest denomination of bitcoin on-chain is the satoshi.
2519 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2522 // Holder designates channel data owned for the benefit of the user client.
2523 // Counterparty designates channel data owned by the another channel participant entity.
2524 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2525 pub context: ChannelContext<SP>,
2528 #[cfg(any(test, fuzzing))]
2529 struct CommitmentTxInfoCached {
2531 total_pending_htlcs: usize,
2532 next_holder_htlc_id: u64,
2533 next_counterparty_htlc_id: u64,
2537 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
2538 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
2539 trait FailHTLCContents {
2540 type Message: FailHTLCMessageName;
2541 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
2542 fn to_inbound_htlc_state(self) -> InboundHTLCState;
2543 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
2545 impl FailHTLCContents for msgs::OnionErrorPacket {
2546 type Message = msgs::UpdateFailHTLC;
2547 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2548 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
2550 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2551 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
2553 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2554 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
2557 impl FailHTLCContents for (u16, [u8; 32]) {
2558 type Message = msgs::UpdateFailMalformedHTLC; // (failure_code, sha256_of_onion)
2559 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2560 msgs::UpdateFailMalformedHTLC {
2563 failure_code: self.0,
2564 sha256_of_onion: self.1
2567 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2568 InboundHTLCState::LocalRemoved(
2569 InboundHTLCRemovalReason::FailMalformed((self.1, self.0))
2572 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2573 HTLCUpdateAwaitingACK::FailMalformedHTLC {
2575 failure_code: self.0,
2576 sha256_of_onion: self.1
2581 trait FailHTLCMessageName {
2582 fn name() -> &'static str;
2584 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
2585 fn name() -> &'static str {
2589 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
2590 fn name() -> &'static str {
2591 "update_fail_malformed_htlc"
2595 impl<SP: Deref> Channel<SP> where
2596 SP::Target: SignerProvider,
2597 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2599 fn check_remote_fee<F: Deref, L: Deref>(
2600 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2601 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2602 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2604 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2605 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2607 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2609 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2610 if feerate_per_kw < lower_limit {
2611 if let Some(cur_feerate) = cur_feerate_per_kw {
2612 if feerate_per_kw > cur_feerate {
2614 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2615 cur_feerate, feerate_per_kw);
2619 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2625 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2626 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2627 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2628 // outside of those situations will fail.
2629 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2633 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2638 1 + // script length (0)
2642 )*4 + // * 4 for non-witness parts
2643 2 + // witness marker and flag
2644 1 + // witness element count
2645 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2646 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2647 2*(1 + 71); // two signatures + sighash type flags
2648 if let Some(spk) = a_scriptpubkey {
2649 ret += ((8+1) + // output values and script length
2650 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2652 if let Some(spk) = b_scriptpubkey {
2653 ret += ((8+1) + // output values and script length
2654 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2660 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2661 assert!(self.context.pending_inbound_htlcs.is_empty());
2662 assert!(self.context.pending_outbound_htlcs.is_empty());
2663 assert!(self.context.pending_update_fee.is_none());
2665 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2666 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2667 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2669 if value_to_holder < 0 {
2670 assert!(self.context.is_outbound());
2671 total_fee_satoshis += (-value_to_holder) as u64;
2672 } else if value_to_counterparty < 0 {
2673 assert!(!self.context.is_outbound());
2674 total_fee_satoshis += (-value_to_counterparty) as u64;
2677 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2678 value_to_counterparty = 0;
2681 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2682 value_to_holder = 0;
2685 assert!(self.context.shutdown_scriptpubkey.is_some());
2686 let holder_shutdown_script = self.get_closing_scriptpubkey();
2687 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2688 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2690 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2691 (closing_transaction, total_fee_satoshis)
2694 fn funding_outpoint(&self) -> OutPoint {
2695 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2698 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2701 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2702 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2704 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2706 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2707 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2708 where L::Target: Logger {
2709 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2710 // (see equivalent if condition there).
2711 assert!(self.context.channel_state.should_force_holding_cell());
2712 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2713 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2714 self.context.latest_monitor_update_id = mon_update_id;
2715 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2716 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2720 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2721 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2722 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2723 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2725 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2726 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2729 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2730 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2731 // these, but for now we just have to treat them as normal.
2733 let mut pending_idx = core::usize::MAX;
2734 let mut htlc_value_msat = 0;
2735 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2736 if htlc.htlc_id == htlc_id_arg {
2737 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2738 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2739 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2741 InboundHTLCState::Committed => {},
2742 InboundHTLCState::LocalRemoved(ref reason) => {
2743 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2745 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2746 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2748 return UpdateFulfillFetch::DuplicateClaim {};
2751 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2752 // Don't return in release mode here so that we can update channel_monitor
2756 htlc_value_msat = htlc.amount_msat;
2760 if pending_idx == core::usize::MAX {
2761 #[cfg(any(test, fuzzing))]
2762 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2763 // this is simply a duplicate claim, not previously failed and we lost funds.
2764 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2765 return UpdateFulfillFetch::DuplicateClaim {};
2768 // Now update local state:
2770 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2771 // can claim it even if the channel hits the chain before we see their next commitment.
2772 self.context.latest_monitor_update_id += 1;
2773 let monitor_update = ChannelMonitorUpdate {
2774 update_id: self.context.latest_monitor_update_id,
2775 counterparty_node_id: Some(self.context.counterparty_node_id),
2776 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2777 payment_preimage: payment_preimage_arg.clone(),
2781 if self.context.channel_state.should_force_holding_cell() {
2782 // Note that this condition is the same as the assertion in
2783 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2784 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2785 // do not not get into this branch.
2786 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2787 match pending_update {
2788 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2789 if htlc_id_arg == htlc_id {
2790 // Make sure we don't leave latest_monitor_update_id incremented here:
2791 self.context.latest_monitor_update_id -= 1;
2792 #[cfg(any(test, fuzzing))]
2793 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2794 return UpdateFulfillFetch::DuplicateClaim {};
2797 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2798 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2800 if htlc_id_arg == htlc_id {
2801 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2802 // TODO: We may actually be able to switch to a fulfill here, though its
2803 // rare enough it may not be worth the complexity burden.
2804 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2805 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2811 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
2812 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2813 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2815 #[cfg(any(test, fuzzing))]
2816 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2817 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2819 #[cfg(any(test, fuzzing))]
2820 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2823 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2824 if let InboundHTLCState::Committed = htlc.state {
2826 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2827 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2829 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2830 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2833 UpdateFulfillFetch::NewClaim {
2836 msg: Some(msgs::UpdateFulfillHTLC {
2837 channel_id: self.context.channel_id(),
2838 htlc_id: htlc_id_arg,
2839 payment_preimage: payment_preimage_arg,
2844 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2845 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2846 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2847 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2848 // Even if we aren't supposed to let new monitor updates with commitment state
2849 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2850 // matter what. Sadly, to push a new monitor update which flies before others
2851 // already queued, we have to insert it into the pending queue and update the
2852 // update_ids of all the following monitors.
2853 if release_cs_monitor && msg.is_some() {
2854 let mut additional_update = self.build_commitment_no_status_check(logger);
2855 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2856 // to be strictly increasing by one, so decrement it here.
2857 self.context.latest_monitor_update_id = monitor_update.update_id;
2858 monitor_update.updates.append(&mut additional_update.updates);
2860 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2861 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2862 monitor_update.update_id = new_mon_id;
2863 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2864 held_update.update.update_id += 1;
2867 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2868 let update = self.build_commitment_no_status_check(logger);
2869 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2875 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2876 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2878 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2882 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2883 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2884 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2885 /// before we fail backwards.
2887 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2888 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2889 /// [`ChannelError::Ignore`].
2890 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2891 -> Result<(), ChannelError> where L::Target: Logger {
2892 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2893 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2896 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
2897 /// want to fail blinded HTLCs where we are not the intro node.
2899 /// See [`Self::queue_fail_htlc`] for more info.
2900 pub fn queue_fail_malformed_htlc<L: Deref>(
2901 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
2902 ) -> Result<(), ChannelError> where L::Target: Logger {
2903 self.fail_htlc(htlc_id_arg, (failure_code, sha256_of_onion), true, logger)
2904 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2907 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2908 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2909 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2910 /// before we fail backwards.
2912 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2913 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2914 /// [`ChannelError::Ignore`].
2915 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
2916 &mut self, htlc_id_arg: u64, err_packet: E, mut force_holding_cell: bool,
2918 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
2919 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2920 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2923 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2924 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2925 // these, but for now we just have to treat them as normal.
2927 let mut pending_idx = core::usize::MAX;
2928 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2929 if htlc.htlc_id == htlc_id_arg {
2931 InboundHTLCState::Committed => {},
2932 InboundHTLCState::LocalRemoved(ref reason) => {
2933 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2935 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2940 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2941 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2947 if pending_idx == core::usize::MAX {
2948 #[cfg(any(test, fuzzing))]
2949 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2950 // is simply a duplicate fail, not previously failed and we failed-back too early.
2951 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2955 if self.context.channel_state.should_force_holding_cell() {
2956 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2957 force_holding_cell = true;
2960 // Now update local state:
2961 if force_holding_cell {
2962 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2963 match pending_update {
2964 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2965 if htlc_id_arg == htlc_id {
2966 #[cfg(any(test, fuzzing))]
2967 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2971 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2972 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2974 if htlc_id_arg == htlc_id {
2975 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2976 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2982 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2983 self.context.holding_cell_htlc_updates.push(err_packet.to_htlc_update_awaiting_ack(htlc_id_arg));
2987 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
2988 E::Message::name(), &self.context.channel_id());
2990 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2991 htlc.state = err_packet.clone().to_inbound_htlc_state();
2994 Ok(Some(err_packet.to_message(htlc_id_arg, self.context.channel_id())))
2997 // Message handlers:
2998 /// Updates the state of the channel to indicate that all channels in the batch have received
2999 /// funding_signed and persisted their monitors.
3000 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
3001 /// treated as a non-batch channel going forward.
3002 pub fn set_batch_ready(&mut self) {
3003 self.context.is_batch_funding = None;
3004 self.context.channel_state.clear_waiting_for_batch();
3007 /// Unsets the existing funding information.
3009 /// This must only be used if the channel has not yet completed funding and has not been used.
3011 /// Further, the channel must be immediately shut down after this with a call to
3012 /// [`ChannelContext::force_shutdown`].
3013 pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
3014 debug_assert!(matches!(
3015 self.context.channel_state, ChannelState::AwaitingChannelReady(_)
3017 self.context.channel_transaction_parameters.funding_outpoint = None;
3018 self.context.channel_id = temporary_channel_id;
3021 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3022 /// and the channel is now usable (and public), this may generate an announcement_signatures to
3024 pub fn channel_ready<NS: Deref, L: Deref>(
3025 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
3026 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3027 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3029 NS::Target: NodeSigner,
3032 if self.context.channel_state.is_peer_disconnected() {
3033 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3034 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3037 if let Some(scid_alias) = msg.short_channel_id_alias {
3038 if Some(scid_alias) != self.context.short_channel_id {
3039 // The scid alias provided can be used to route payments *from* our counterparty,
3040 // i.e. can be used for inbound payments and provided in invoices, but is not used
3041 // when routing outbound payments.
3042 self.context.latest_inbound_scid_alias = Some(scid_alias);
3046 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3047 // batch, but we can receive channel_ready messages.
3048 let mut check_reconnection = false;
3049 match &self.context.channel_state {
3050 ChannelState::AwaitingChannelReady(flags) => {
3051 let flags = *flags & !FundedStateFlags::ALL;
3052 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3053 if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3054 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3055 check_reconnection = true;
3056 } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3057 self.context.channel_state.set_their_channel_ready();
3058 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3059 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3060 self.context.update_time_counter += 1;
3062 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3063 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3066 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3067 ChannelState::ChannelReady(_) => check_reconnection = true,
3068 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
3070 if check_reconnection {
3071 // They probably disconnected/reconnected and re-sent the channel_ready, which is
3072 // required, or they're sending a fresh SCID alias.
3073 let expected_point =
3074 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3075 // If they haven't ever sent an updated point, the point they send should match
3077 self.context.counterparty_cur_commitment_point
3078 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3079 // If we've advanced the commitment number once, the second commitment point is
3080 // at `counterparty_prev_commitment_point`, which is not yet revoked.
3081 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3082 self.context.counterparty_prev_commitment_point
3084 // If they have sent updated points, channel_ready is always supposed to match
3085 // their "first" point, which we re-derive here.
3086 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3087 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3088 ).expect("We already advanced, so previous secret keys should have been validated already")))
3090 if expected_point != Some(msg.next_per_commitment_point) {
3091 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3096 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3097 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3099 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3101 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3104 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3105 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3106 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3107 ) -> Result<(), ChannelError>
3108 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3109 FE::Target: FeeEstimator, L::Target: Logger,
3111 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3112 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3114 // We can't accept HTLCs sent after we've sent a shutdown.
3115 if self.context.channel_state.is_local_shutdown_sent() {
3116 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3118 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3119 if self.context.channel_state.is_remote_shutdown_sent() {
3120 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3122 if self.context.channel_state.is_peer_disconnected() {
3123 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3125 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3126 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3128 if msg.amount_msat == 0 {
3129 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3131 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3132 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3135 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3136 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3137 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3138 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3140 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3141 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3144 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3145 // the reserve_satoshis we told them to always have as direct payment so that they lose
3146 // something if we punish them for broadcasting an old state).
3147 // Note that we don't really care about having a small/no to_remote output in our local
3148 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3149 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3150 // present in the next commitment transaction we send them (at least for fulfilled ones,
3151 // failed ones won't modify value_to_self).
3152 // Note that we will send HTLCs which another instance of rust-lightning would think
3153 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3154 // Channel state once they will not be present in the next received commitment
3156 let mut removed_outbound_total_msat = 0;
3157 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3158 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3159 removed_outbound_total_msat += htlc.amount_msat;
3160 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3161 removed_outbound_total_msat += htlc.amount_msat;
3165 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3166 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3169 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3170 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3171 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3173 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3174 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3175 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3176 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3177 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3178 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3179 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3183 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3184 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3185 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3186 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3187 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3188 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3189 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3193 let pending_value_to_self_msat =
3194 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3195 let pending_remote_value_msat =
3196 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3197 if pending_remote_value_msat < msg.amount_msat {
3198 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3201 // Check that the remote can afford to pay for this HTLC on-chain at the current
3202 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3204 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3205 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3206 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3208 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3209 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3213 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3214 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3216 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3217 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3221 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3222 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3226 if !self.context.is_outbound() {
3227 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3228 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3229 // side, only on the sender's. Note that with anchor outputs we are no longer as
3230 // sensitive to fee spikes, so we need to account for them.
3231 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3232 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3233 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3234 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3236 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3237 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3238 // the HTLC, i.e. its status is already set to failing.
3239 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3240 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3243 // Check that they won't violate our local required channel reserve by adding this HTLC.
3244 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3245 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3246 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3247 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3250 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3251 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3253 if msg.cltv_expiry >= 500000000 {
3254 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3257 if self.context.channel_state.is_local_shutdown_sent() {
3258 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3259 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3263 // Now update local state:
3264 self.context.next_counterparty_htlc_id += 1;
3265 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3266 htlc_id: msg.htlc_id,
3267 amount_msat: msg.amount_msat,
3268 payment_hash: msg.payment_hash,
3269 cltv_expiry: msg.cltv_expiry,
3270 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3275 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3277 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3278 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3279 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3280 if htlc.htlc_id == htlc_id {
3281 let outcome = match check_preimage {
3282 None => fail_reason.into(),
3283 Some(payment_preimage) => {
3284 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3285 if payment_hash != htlc.payment_hash {
3286 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3288 OutboundHTLCOutcome::Success(Some(payment_preimage))
3292 OutboundHTLCState::LocalAnnounced(_) =>
3293 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3294 OutboundHTLCState::Committed => {
3295 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3297 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3298 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3303 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3306 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3307 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3308 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3310 if self.context.channel_state.is_peer_disconnected() {
3311 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3314 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3317 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3318 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3319 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3321 if self.context.channel_state.is_peer_disconnected() {
3322 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3325 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3329 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3330 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3331 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3333 if self.context.channel_state.is_peer_disconnected() {
3334 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3337 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3341 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3342 where L::Target: Logger
3344 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3345 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3347 if self.context.channel_state.is_peer_disconnected() {
3348 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3350 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3351 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3354 let funding_script = self.context.get_funding_redeemscript();
3356 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3358 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3359 let commitment_txid = {
3360 let trusted_tx = commitment_stats.tx.trust();
3361 let bitcoin_tx = trusted_tx.built_transaction();
3362 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3364 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3365 log_bytes!(msg.signature.serialize_compact()[..]),
3366 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3367 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3368 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3369 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3373 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3375 // If our counterparty updated the channel fee in this commitment transaction, check that
3376 // they can actually afford the new fee now.
3377 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3378 update_state == FeeUpdateState::RemoteAnnounced
3381 debug_assert!(!self.context.is_outbound());
3382 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3383 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3384 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3387 #[cfg(any(test, fuzzing))]
3389 if self.context.is_outbound() {
3390 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3391 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3392 if let Some(info) = projected_commit_tx_info {
3393 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3394 + self.context.holding_cell_htlc_updates.len();
3395 if info.total_pending_htlcs == total_pending_htlcs
3396 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3397 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3398 && info.feerate == self.context.feerate_per_kw {
3399 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3405 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3406 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3409 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3410 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3411 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3412 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3413 // backwards compatibility, we never use it in production. To provide test coverage, here,
3414 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3415 #[allow(unused_assignments, unused_mut)]
3416 let mut separate_nondust_htlc_sources = false;
3417 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3418 use core::hash::{BuildHasher, Hasher};
3419 // Get a random value using the only std API to do so - the DefaultHasher
3420 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3421 separate_nondust_htlc_sources = rand_val % 2 == 0;
3424 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3425 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3426 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3427 if let Some(_) = htlc.transaction_output_index {
3428 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3429 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3430 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3432 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3433 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3434 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3435 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3436 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3437 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3438 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3439 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3441 if !separate_nondust_htlc_sources {
3442 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3445 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3447 if separate_nondust_htlc_sources {
3448 if let Some(source) = source_opt.take() {
3449 nondust_htlc_sources.push(source);
3452 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3455 let holder_commitment_tx = HolderCommitmentTransaction::new(
3456 commitment_stats.tx,
3458 msg.htlc_signatures.clone(),
3459 &self.context.get_holder_pubkeys().funding_pubkey,
3460 self.context.counterparty_funding_pubkey()
3463 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3464 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3466 // Update state now that we've passed all the can-fail calls...
3467 let mut need_commitment = false;
3468 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3469 if *update_state == FeeUpdateState::RemoteAnnounced {
3470 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3471 need_commitment = true;
3475 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3476 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3477 Some(forward_info.clone())
3479 if let Some(forward_info) = new_forward {
3480 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3481 &htlc.payment_hash, &self.context.channel_id);
3482 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3483 need_commitment = true;
3486 let mut claimed_htlcs = Vec::new();
3487 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3488 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3489 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3490 &htlc.payment_hash, &self.context.channel_id);
3491 // Grab the preimage, if it exists, instead of cloning
3492 let mut reason = OutboundHTLCOutcome::Success(None);
3493 mem::swap(outcome, &mut reason);
3494 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3495 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3496 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3497 // have a `Success(None)` reason. In this case we could forget some HTLC
3498 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3499 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3501 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3503 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3504 need_commitment = true;
3508 self.context.latest_monitor_update_id += 1;
3509 let mut monitor_update = ChannelMonitorUpdate {
3510 update_id: self.context.latest_monitor_update_id,
3511 counterparty_node_id: Some(self.context.counterparty_node_id),
3512 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3513 commitment_tx: holder_commitment_tx,
3514 htlc_outputs: htlcs_and_sigs,
3516 nondust_htlc_sources,
3520 self.context.cur_holder_commitment_transaction_number -= 1;
3521 self.context.expecting_peer_commitment_signed = false;
3522 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3523 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3524 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3526 if self.context.channel_state.is_monitor_update_in_progress() {
3527 // In case we initially failed monitor updating without requiring a response, we need
3528 // to make sure the RAA gets sent first.
3529 self.context.monitor_pending_revoke_and_ack = true;
3530 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3531 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3532 // the corresponding HTLC status updates so that
3533 // get_last_commitment_update_for_send includes the right HTLCs.
3534 self.context.monitor_pending_commitment_signed = true;
3535 let mut additional_update = self.build_commitment_no_status_check(logger);
3536 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3537 // strictly increasing by one, so decrement it here.
3538 self.context.latest_monitor_update_id = monitor_update.update_id;
3539 monitor_update.updates.append(&mut additional_update.updates);
3541 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3542 &self.context.channel_id);
3543 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3546 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3547 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3548 // we'll send one right away when we get the revoke_and_ack when we
3549 // free_holding_cell_htlcs().
3550 let mut additional_update = self.build_commitment_no_status_check(logger);
3551 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3552 // strictly increasing by one, so decrement it here.
3553 self.context.latest_monitor_update_id = monitor_update.update_id;
3554 monitor_update.updates.append(&mut additional_update.updates);
3558 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3559 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3560 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3561 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3564 /// Public version of the below, checking relevant preconditions first.
3565 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3566 /// returns `(None, Vec::new())`.
3567 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3568 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3569 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3570 where F::Target: FeeEstimator, L::Target: Logger
3572 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
3573 self.free_holding_cell_htlcs(fee_estimator, logger)
3574 } else { (None, Vec::new()) }
3577 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3578 /// for our counterparty.
3579 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3580 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3581 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3582 where F::Target: FeeEstimator, L::Target: Logger
3584 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3585 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3586 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3587 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3589 let mut monitor_update = ChannelMonitorUpdate {
3590 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3591 counterparty_node_id: Some(self.context.counterparty_node_id),
3592 updates: Vec::new(),
3595 let mut htlc_updates = Vec::new();
3596 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3597 let mut update_add_count = 0;
3598 let mut update_fulfill_count = 0;
3599 let mut update_fail_count = 0;
3600 let mut htlcs_to_fail = Vec::new();
3601 for htlc_update in htlc_updates.drain(..) {
3602 // Note that this *can* fail, though it should be due to rather-rare conditions on
3603 // fee races with adding too many outputs which push our total payments just over
3604 // the limit. In case it's less rare than I anticipate, we may want to revisit
3605 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3606 // to rebalance channels.
3607 match &htlc_update {
3608 &HTLCUpdateAwaitingACK::AddHTLC {
3609 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3610 skimmed_fee_msat, blinding_point, ..
3612 match self.send_htlc(
3613 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3614 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3616 Ok(_) => update_add_count += 1,
3619 ChannelError::Ignore(ref msg) => {
3620 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3621 // If we fail to send here, then this HTLC should
3622 // be failed backwards. Failing to send here
3623 // indicates that this HTLC may keep being put back
3624 // into the holding cell without ever being
3625 // successfully forwarded/failed/fulfilled, causing
3626 // our counterparty to eventually close on us.
3627 htlcs_to_fail.push((source.clone(), *payment_hash));
3630 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3636 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3637 // If an HTLC claim was previously added to the holding cell (via
3638 // `get_update_fulfill_htlc`, then generating the claim message itself must
3639 // not fail - any in between attempts to claim the HTLC will have resulted
3640 // in it hitting the holding cell again and we cannot change the state of a
3641 // holding cell HTLC from fulfill to anything else.
3642 let mut additional_monitor_update =
3643 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3644 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3645 { monitor_update } else { unreachable!() };
3646 update_fulfill_count += 1;
3647 monitor_update.updates.append(&mut additional_monitor_update.updates);
3649 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3650 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3651 Ok(update_fail_msg_option) => {
3652 // If an HTLC failure was previously added to the holding cell (via
3653 // `queue_fail_htlc`) then generating the fail message itself must
3654 // not fail - we should never end up in a state where we double-fail
3655 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3656 // for a full revocation before failing.
3657 debug_assert!(update_fail_msg_option.is_some());
3658 update_fail_count += 1;
3661 if let ChannelError::Ignore(_) = e {}
3663 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3668 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
3669 match self.fail_htlc(htlc_id, (failure_code, sha256_of_onion), false, logger) {
3670 Ok(update_fail_malformed_opt) => {
3671 debug_assert!(update_fail_malformed_opt.is_some()); // See above comment
3672 update_fail_count += 1;
3675 if let ChannelError::Ignore(_) = e {}
3677 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3684 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3685 return (None, htlcs_to_fail);
3687 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3688 self.send_update_fee(feerate, false, fee_estimator, logger)
3693 let mut additional_update = self.build_commitment_no_status_check(logger);
3694 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3695 // but we want them to be strictly increasing by one, so reset it here.
3696 self.context.latest_monitor_update_id = monitor_update.update_id;
3697 monitor_update.updates.append(&mut additional_update.updates);
3699 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3700 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3701 update_add_count, update_fulfill_count, update_fail_count);
3703 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3704 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3710 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3711 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3712 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3713 /// generating an appropriate error *after* the channel state has been updated based on the
3714 /// revoke_and_ack message.
3715 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3716 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3717 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3718 where F::Target: FeeEstimator, L::Target: Logger,
3720 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3721 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3723 if self.context.channel_state.is_peer_disconnected() {
3724 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3726 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3727 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3730 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3732 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3733 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3734 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3738 if !self.context.channel_state.is_awaiting_remote_revoke() {
3739 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3740 // haven't given them a new commitment transaction to broadcast). We should probably
3741 // take advantage of this by updating our channel monitor, sending them an error, and
3742 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3743 // lot of work, and there's some chance this is all a misunderstanding anyway.
3744 // We have to do *something*, though, since our signer may get mad at us for otherwise
3745 // jumping a remote commitment number, so best to just force-close and move on.
3746 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3749 #[cfg(any(test, fuzzing))]
3751 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3752 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3755 match &self.context.holder_signer {
3756 ChannelSignerType::Ecdsa(ecdsa) => {
3757 ecdsa.validate_counterparty_revocation(
3758 self.context.cur_counterparty_commitment_transaction_number + 1,
3760 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3762 // TODO (taproot|arik)
3767 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3768 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3769 self.context.latest_monitor_update_id += 1;
3770 let mut monitor_update = ChannelMonitorUpdate {
3771 update_id: self.context.latest_monitor_update_id,
3772 counterparty_node_id: Some(self.context.counterparty_node_id),
3773 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3774 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3775 secret: msg.per_commitment_secret,
3779 // Update state now that we've passed all the can-fail calls...
3780 // (note that we may still fail to generate the new commitment_signed message, but that's
3781 // OK, we step the channel here and *then* if the new generation fails we can fail the
3782 // channel based on that, but stepping stuff here should be safe either way.
3783 self.context.channel_state.clear_awaiting_remote_revoke();
3784 self.context.sent_message_awaiting_response = None;
3785 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3786 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3787 self.context.cur_counterparty_commitment_transaction_number -= 1;
3789 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3790 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3793 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3794 let mut to_forward_infos = Vec::new();
3795 let mut revoked_htlcs = Vec::new();
3796 let mut finalized_claimed_htlcs = Vec::new();
3797 let mut update_fail_htlcs = Vec::new();
3798 let mut update_fail_malformed_htlcs = Vec::new();
3799 let mut require_commitment = false;
3800 let mut value_to_self_msat_diff: i64 = 0;
3803 // Take references explicitly so that we can hold multiple references to self.context.
3804 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3805 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3806 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3808 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3809 pending_inbound_htlcs.retain(|htlc| {
3810 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3811 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3812 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3813 value_to_self_msat_diff += htlc.amount_msat as i64;
3815 *expecting_peer_commitment_signed = true;
3819 pending_outbound_htlcs.retain(|htlc| {
3820 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3821 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3822 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3823 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3825 finalized_claimed_htlcs.push(htlc.source.clone());
3826 // They fulfilled, so we sent them money
3827 value_to_self_msat_diff -= htlc.amount_msat as i64;
3832 for htlc in pending_inbound_htlcs.iter_mut() {
3833 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3835 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3839 let mut state = InboundHTLCState::Committed;
3840 mem::swap(&mut state, &mut htlc.state);
3842 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3843 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3844 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3845 require_commitment = true;
3846 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3847 match forward_info {
3848 PendingHTLCStatus::Fail(fail_msg) => {
3849 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3850 require_commitment = true;
3852 HTLCFailureMsg::Relay(msg) => {
3853 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3854 update_fail_htlcs.push(msg)
3856 HTLCFailureMsg::Malformed(msg) => {
3857 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3858 update_fail_malformed_htlcs.push(msg)
3862 PendingHTLCStatus::Forward(forward_info) => {
3863 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3864 to_forward_infos.push((forward_info, htlc.htlc_id));
3865 htlc.state = InboundHTLCState::Committed;
3871 for htlc in pending_outbound_htlcs.iter_mut() {
3872 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3873 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3874 htlc.state = OutboundHTLCState::Committed;
3875 *expecting_peer_commitment_signed = true;
3877 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3878 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3879 // Grab the preimage, if it exists, instead of cloning
3880 let mut reason = OutboundHTLCOutcome::Success(None);
3881 mem::swap(outcome, &mut reason);
3882 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3883 require_commitment = true;
3887 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3889 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3890 match update_state {
3891 FeeUpdateState::Outbound => {
3892 debug_assert!(self.context.is_outbound());
3893 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3894 self.context.feerate_per_kw = feerate;
3895 self.context.pending_update_fee = None;
3896 self.context.expecting_peer_commitment_signed = true;
3898 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3899 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3900 debug_assert!(!self.context.is_outbound());
3901 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3902 require_commitment = true;
3903 self.context.feerate_per_kw = feerate;
3904 self.context.pending_update_fee = None;
3909 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3910 let release_state_str =
3911 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3912 macro_rules! return_with_htlcs_to_fail {
3913 ($htlcs_to_fail: expr) => {
3914 if !release_monitor {
3915 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3916 update: monitor_update,
3918 return Ok(($htlcs_to_fail, None));
3920 return Ok(($htlcs_to_fail, Some(monitor_update)));
3925 if self.context.channel_state.is_monitor_update_in_progress() {
3926 // We can't actually generate a new commitment transaction (incl by freeing holding
3927 // cells) while we can't update the monitor, so we just return what we have.
3928 if require_commitment {
3929 self.context.monitor_pending_commitment_signed = true;
3930 // When the monitor updating is restored we'll call
3931 // get_last_commitment_update_for_send(), which does not update state, but we're
3932 // definitely now awaiting a remote revoke before we can step forward any more, so
3934 let mut additional_update = self.build_commitment_no_status_check(logger);
3935 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3936 // strictly increasing by one, so decrement it here.
3937 self.context.latest_monitor_update_id = monitor_update.update_id;
3938 monitor_update.updates.append(&mut additional_update.updates);
3940 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3941 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3942 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3943 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3944 return_with_htlcs_to_fail!(Vec::new());
3947 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3948 (Some(mut additional_update), htlcs_to_fail) => {
3949 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3950 // strictly increasing by one, so decrement it here.
3951 self.context.latest_monitor_update_id = monitor_update.update_id;
3952 monitor_update.updates.append(&mut additional_update.updates);
3954 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3955 &self.context.channel_id(), release_state_str);
3957 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3958 return_with_htlcs_to_fail!(htlcs_to_fail);
3960 (None, htlcs_to_fail) => {
3961 if require_commitment {
3962 let mut additional_update = self.build_commitment_no_status_check(logger);
3964 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3965 // strictly increasing by one, so decrement it here.
3966 self.context.latest_monitor_update_id = monitor_update.update_id;
3967 monitor_update.updates.append(&mut additional_update.updates);
3969 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3970 &self.context.channel_id(),
3971 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3974 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3975 return_with_htlcs_to_fail!(htlcs_to_fail);
3977 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3978 &self.context.channel_id(), release_state_str);
3980 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3981 return_with_htlcs_to_fail!(htlcs_to_fail);
3987 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3988 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3989 /// commitment update.
3990 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3991 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3992 where F::Target: FeeEstimator, L::Target: Logger
3994 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3995 assert!(msg_opt.is_none(), "We forced holding cell?");
3998 /// Adds a pending update to this channel. See the doc for send_htlc for
3999 /// further details on the optionness of the return value.
4000 /// If our balance is too low to cover the cost of the next commitment transaction at the
4001 /// new feerate, the update is cancelled.
4003 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
4004 /// [`Channel`] if `force_holding_cell` is false.
4005 fn send_update_fee<F: Deref, L: Deref>(
4006 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
4007 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4008 ) -> Option<msgs::UpdateFee>
4009 where F::Target: FeeEstimator, L::Target: Logger
4011 if !self.context.is_outbound() {
4012 panic!("Cannot send fee from inbound channel");
4014 if !self.context.is_usable() {
4015 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
4017 if !self.context.is_live() {
4018 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
4021 // Before proposing a feerate update, check that we can actually afford the new fee.
4022 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
4023 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
4024 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4025 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4026 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
4027 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
4028 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4029 //TODO: auto-close after a number of failures?
4030 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
4034 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
4035 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4036 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4037 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4038 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4039 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4042 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4043 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4047 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4048 force_holding_cell = true;
4051 if force_holding_cell {
4052 self.context.holding_cell_update_fee = Some(feerate_per_kw);
4056 debug_assert!(self.context.pending_update_fee.is_none());
4057 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4059 Some(msgs::UpdateFee {
4060 channel_id: self.context.channel_id,
4065 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4066 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4068 /// No further message handling calls may be made until a channel_reestablish dance has
4070 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
4071 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
4072 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4073 if self.context.channel_state.is_pre_funded_state() {
4077 if self.context.channel_state.is_peer_disconnected() {
4078 // While the below code should be idempotent, it's simpler to just return early, as
4079 // redundant disconnect events can fire, though they should be rare.
4083 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4084 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4087 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4088 // will be retransmitted.
4089 self.context.last_sent_closing_fee = None;
4090 self.context.pending_counterparty_closing_signed = None;
4091 self.context.closing_fee_limits = None;
4093 let mut inbound_drop_count = 0;
4094 self.context.pending_inbound_htlcs.retain(|htlc| {
4096 InboundHTLCState::RemoteAnnounced(_) => {
4097 // They sent us an update_add_htlc but we never got the commitment_signed.
4098 // We'll tell them what commitment_signed we're expecting next and they'll drop
4099 // this HTLC accordingly
4100 inbound_drop_count += 1;
4103 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4104 // We received a commitment_signed updating this HTLC and (at least hopefully)
4105 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4106 // in response to it yet, so don't touch it.
4109 InboundHTLCState::Committed => true,
4110 InboundHTLCState::LocalRemoved(_) => {
4111 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4112 // re-transmit if needed) and they may have even sent a revoke_and_ack back
4113 // (that we missed). Keep this around for now and if they tell us they missed
4114 // the commitment_signed we can re-transmit the update then.
4119 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4121 if let Some((_, update_state)) = self.context.pending_update_fee {
4122 if update_state == FeeUpdateState::RemoteAnnounced {
4123 debug_assert!(!self.context.is_outbound());
4124 self.context.pending_update_fee = None;
4128 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4129 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4130 // They sent us an update to remove this but haven't yet sent the corresponding
4131 // commitment_signed, we need to move it back to Committed and they can re-send
4132 // the update upon reconnection.
4133 htlc.state = OutboundHTLCState::Committed;
4137 self.context.sent_message_awaiting_response = None;
4139 self.context.channel_state.set_peer_disconnected();
4140 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4144 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4145 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4146 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4147 /// update completes (potentially immediately).
4148 /// The messages which were generated with the monitor update must *not* have been sent to the
4149 /// remote end, and must instead have been dropped. They will be regenerated when
4150 /// [`Self::monitor_updating_restored`] is called.
4152 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4153 /// [`chain::Watch`]: crate::chain::Watch
4154 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4155 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4156 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4157 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4158 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4160 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4161 self.context.monitor_pending_commitment_signed |= resend_commitment;
4162 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4163 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4164 self.context.monitor_pending_failures.append(&mut pending_fails);
4165 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4166 self.context.channel_state.set_monitor_update_in_progress();
4169 /// Indicates that the latest ChannelMonitor update has been committed by the client
4170 /// successfully and we should restore normal operation. Returns messages which should be sent
4171 /// to the remote side.
4172 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4173 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4174 user_config: &UserConfig, best_block_height: u32
4175 ) -> MonitorRestoreUpdates
4178 NS::Target: NodeSigner
4180 assert!(self.context.channel_state.is_monitor_update_in_progress());
4181 self.context.channel_state.clear_monitor_update_in_progress();
4183 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4184 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4185 // first received the funding_signed.
4186 let mut funding_broadcastable =
4187 if self.context.is_outbound() &&
4188 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4189 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
4191 self.context.funding_transaction.take()
4193 // That said, if the funding transaction is already confirmed (ie we're active with a
4194 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4195 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4196 funding_broadcastable = None;
4199 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4200 // (and we assume the user never directly broadcasts the funding transaction and waits for
4201 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4202 // * an inbound channel that failed to persist the monitor on funding_created and we got
4203 // the funding transaction confirmed before the monitor was persisted, or
4204 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4205 let channel_ready = if self.context.monitor_pending_channel_ready {
4206 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4207 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4208 self.context.monitor_pending_channel_ready = false;
4209 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4210 Some(msgs::ChannelReady {
4211 channel_id: self.context.channel_id(),
4212 next_per_commitment_point,
4213 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4217 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4219 let mut accepted_htlcs = Vec::new();
4220 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4221 let mut failed_htlcs = Vec::new();
4222 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4223 let mut finalized_claimed_htlcs = Vec::new();
4224 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4226 if self.context.channel_state.is_peer_disconnected() {
4227 self.context.monitor_pending_revoke_and_ack = false;
4228 self.context.monitor_pending_commitment_signed = false;
4229 return MonitorRestoreUpdates {
4230 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4231 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4235 let raa = if self.context.monitor_pending_revoke_and_ack {
4236 Some(self.get_last_revoke_and_ack())
4238 let commitment_update = if self.context.monitor_pending_commitment_signed {
4239 self.get_last_commitment_update_for_send(logger).ok()
4241 if commitment_update.is_some() {
4242 self.mark_awaiting_response();
4245 self.context.monitor_pending_revoke_and_ack = false;
4246 self.context.monitor_pending_commitment_signed = false;
4247 let order = self.context.resend_order.clone();
4248 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4249 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4250 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4251 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4252 MonitorRestoreUpdates {
4253 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4257 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4258 where F::Target: FeeEstimator, L::Target: Logger
4260 if self.context.is_outbound() {
4261 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4263 if self.context.channel_state.is_peer_disconnected() {
4264 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4266 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4268 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4269 self.context.update_time_counter += 1;
4270 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4271 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4272 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4273 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4274 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4275 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4276 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4277 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4278 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4279 msg.feerate_per_kw, holder_tx_dust_exposure)));
4281 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4282 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4283 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4289 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4291 #[cfg(async_signing)]
4292 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4293 let commitment_update = if self.context.signer_pending_commitment_update {
4294 self.get_last_commitment_update_for_send(logger).ok()
4296 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4297 self.context.get_funding_signed_msg(logger).1
4299 let channel_ready = if funding_signed.is_some() {
4300 self.check_get_channel_ready(0)
4303 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
4304 if commitment_update.is_some() { "a" } else { "no" },
4305 if funding_signed.is_some() { "a" } else { "no" },
4306 if channel_ready.is_some() { "a" } else { "no" });
4308 SignerResumeUpdates {
4315 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4316 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4317 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4318 msgs::RevokeAndACK {
4319 channel_id: self.context.channel_id,
4320 per_commitment_secret,
4321 next_per_commitment_point,
4323 next_local_nonce: None,
4327 /// Gets the last commitment update for immediate sending to our peer.
4328 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4329 let mut update_add_htlcs = Vec::new();
4330 let mut update_fulfill_htlcs = Vec::new();
4331 let mut update_fail_htlcs = Vec::new();
4332 let mut update_fail_malformed_htlcs = Vec::new();
4334 for htlc in self.context.pending_outbound_htlcs.iter() {
4335 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4336 update_add_htlcs.push(msgs::UpdateAddHTLC {
4337 channel_id: self.context.channel_id(),
4338 htlc_id: htlc.htlc_id,
4339 amount_msat: htlc.amount_msat,
4340 payment_hash: htlc.payment_hash,
4341 cltv_expiry: htlc.cltv_expiry,
4342 onion_routing_packet: (**onion_packet).clone(),
4343 skimmed_fee_msat: htlc.skimmed_fee_msat,
4344 blinding_point: htlc.blinding_point,
4349 for htlc in self.context.pending_inbound_htlcs.iter() {
4350 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4352 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4353 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4354 channel_id: self.context.channel_id(),
4355 htlc_id: htlc.htlc_id,
4356 reason: err_packet.clone()
4359 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4360 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4361 channel_id: self.context.channel_id(),
4362 htlc_id: htlc.htlc_id,
4363 sha256_of_onion: sha256_of_onion.clone(),
4364 failure_code: failure_code.clone(),
4367 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4368 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4369 channel_id: self.context.channel_id(),
4370 htlc_id: htlc.htlc_id,
4371 payment_preimage: payment_preimage.clone(),
4378 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4379 Some(msgs::UpdateFee {
4380 channel_id: self.context.channel_id(),
4381 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4385 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4386 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4387 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4388 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4389 if self.context.signer_pending_commitment_update {
4390 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4391 self.context.signer_pending_commitment_update = false;
4395 #[cfg(not(async_signing))] {
4396 panic!("Failed to get signature for new commitment state");
4398 #[cfg(async_signing)] {
4399 if !self.context.signer_pending_commitment_update {
4400 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4401 self.context.signer_pending_commitment_update = true;
4406 Ok(msgs::CommitmentUpdate {
4407 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4412 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4413 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4414 if self.context.channel_state.is_local_shutdown_sent() {
4415 assert!(self.context.shutdown_scriptpubkey.is_some());
4416 Some(msgs::Shutdown {
4417 channel_id: self.context.channel_id,
4418 scriptpubkey: self.get_closing_scriptpubkey(),
4423 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4424 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4426 /// Some links printed in log lines are included here to check them during build (when run with
4427 /// `cargo doc --document-private-items`):
4428 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4429 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4430 pub fn channel_reestablish<L: Deref, NS: Deref>(
4431 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4432 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4433 ) -> Result<ReestablishResponses, ChannelError>
4436 NS::Target: NodeSigner
4438 if !self.context.channel_state.is_peer_disconnected() {
4439 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4440 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4441 // just close here instead of trying to recover.
4442 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4445 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4446 msg.next_local_commitment_number == 0 {
4447 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4450 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4451 if msg.next_remote_commitment_number > 0 {
4452 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4453 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4454 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4455 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4456 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4458 if msg.next_remote_commitment_number > our_commitment_transaction {
4459 macro_rules! log_and_panic {
4460 ($err_msg: expr) => {
4461 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4462 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4465 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4466 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4467 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4468 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4469 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4470 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4471 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4472 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4476 // Before we change the state of the channel, we check if the peer is sending a very old
4477 // commitment transaction number, if yes we send a warning message.
4478 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4479 return Err(ChannelError::Warn(format!(
4480 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4481 msg.next_remote_commitment_number,
4482 our_commitment_transaction
4486 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4487 // remaining cases either succeed or ErrorMessage-fail).
4488 self.context.channel_state.clear_peer_disconnected();
4489 self.context.sent_message_awaiting_response = None;
4491 let shutdown_msg = self.get_outbound_shutdown();
4493 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4495 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4496 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4497 if !self.context.channel_state.is_our_channel_ready() ||
4498 self.context.channel_state.is_monitor_update_in_progress() {
4499 if msg.next_remote_commitment_number != 0 {
4500 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4502 // Short circuit the whole handler as there is nothing we can resend them
4503 return Ok(ReestablishResponses {
4504 channel_ready: None,
4505 raa: None, commitment_update: None,
4506 order: RAACommitmentOrder::CommitmentFirst,
4507 shutdown_msg, announcement_sigs,
4511 // We have OurChannelReady set!
4512 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4513 return Ok(ReestablishResponses {
4514 channel_ready: Some(msgs::ChannelReady {
4515 channel_id: self.context.channel_id(),
4516 next_per_commitment_point,
4517 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4519 raa: None, commitment_update: None,
4520 order: RAACommitmentOrder::CommitmentFirst,
4521 shutdown_msg, announcement_sigs,
4525 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4526 // Remote isn't waiting on any RevokeAndACK from us!
4527 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4529 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4530 if self.context.channel_state.is_monitor_update_in_progress() {
4531 self.context.monitor_pending_revoke_and_ack = true;
4534 Some(self.get_last_revoke_and_ack())
4537 debug_assert!(false, "All values should have been handled in the four cases above");
4538 return Err(ChannelError::Close(format!(
4539 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4540 msg.next_remote_commitment_number,
4541 our_commitment_transaction
4545 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4546 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4547 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4548 // the corresponding revoke_and_ack back yet.
4549 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4550 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4551 self.mark_awaiting_response();
4553 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4555 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4556 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4557 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4558 Some(msgs::ChannelReady {
4559 channel_id: self.context.channel_id(),
4560 next_per_commitment_point,
4561 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4565 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4566 if required_revoke.is_some() {
4567 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4569 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4572 Ok(ReestablishResponses {
4573 channel_ready, shutdown_msg, announcement_sigs,
4574 raa: required_revoke,
4575 commitment_update: None,
4576 order: self.context.resend_order.clone(),
4578 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4579 if required_revoke.is_some() {
4580 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4582 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4585 if self.context.channel_state.is_monitor_update_in_progress() {
4586 self.context.monitor_pending_commitment_signed = true;
4587 Ok(ReestablishResponses {
4588 channel_ready, shutdown_msg, announcement_sigs,
4589 commitment_update: None, raa: None,
4590 order: self.context.resend_order.clone(),
4593 Ok(ReestablishResponses {
4594 channel_ready, shutdown_msg, announcement_sigs,
4595 raa: required_revoke,
4596 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4597 order: self.context.resend_order.clone(),
4600 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4601 Err(ChannelError::Close(format!(
4602 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4603 msg.next_local_commitment_number,
4604 next_counterparty_commitment_number,
4607 Err(ChannelError::Close(format!(
4608 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4609 msg.next_local_commitment_number,
4610 next_counterparty_commitment_number,
4615 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4616 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4617 /// at which point they will be recalculated.
4618 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4620 where F::Target: FeeEstimator
4622 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4624 // Propose a range from our current Background feerate to our Normal feerate plus our
4625 // force_close_avoidance_max_fee_satoshis.
4626 // If we fail to come to consensus, we'll have to force-close.
4627 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4628 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4629 // that we don't expect to need fee bumping
4630 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4631 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4633 // The spec requires that (when the channel does not have anchors) we only send absolute
4634 // channel fees no greater than the absolute channel fee on the current commitment
4635 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4636 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4637 // some force-closure by old nodes, but we wanted to close the channel anyway.
4639 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4640 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4641 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4642 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4645 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4646 // below our dust limit, causing the output to disappear. We don't bother handling this
4647 // case, however, as this should only happen if a channel is closed before any (material)
4648 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4649 // come to consensus with our counterparty on appropriate fees, however it should be a
4650 // relatively rare case. We can revisit this later, though note that in order to determine
4651 // if the funders' output is dust we have to know the absolute fee we're going to use.
4652 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4653 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4654 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4655 // We always add force_close_avoidance_max_fee_satoshis to our normal
4656 // feerate-calculated fee, but allow the max to be overridden if we're using a
4657 // target feerate-calculated fee.
4658 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4659 proposed_max_feerate as u64 * tx_weight / 1000)
4661 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4664 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4665 self.context.closing_fee_limits.clone().unwrap()
4668 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4669 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4670 /// this point if we're the funder we should send the initial closing_signed, and in any case
4671 /// shutdown should complete within a reasonable timeframe.
4672 fn closing_negotiation_ready(&self) -> bool {
4673 self.context.closing_negotiation_ready()
4676 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4677 /// an Err if no progress is being made and the channel should be force-closed instead.
4678 /// Should be called on a one-minute timer.
4679 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4680 if self.closing_negotiation_ready() {
4681 if self.context.closing_signed_in_flight {
4682 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4684 self.context.closing_signed_in_flight = true;
4690 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4691 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4692 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4693 where F::Target: FeeEstimator, L::Target: Logger
4695 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4696 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4697 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4698 // that closing_negotiation_ready checks this case (as well as a few others).
4699 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4700 return Ok((None, None, None));
4703 if !self.context.is_outbound() {
4704 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4705 return self.closing_signed(fee_estimator, &msg);
4707 return Ok((None, None, None));
4710 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4711 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4712 if self.context.expecting_peer_commitment_signed {
4713 return Ok((None, None, None));
4716 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4718 assert!(self.context.shutdown_scriptpubkey.is_some());
4719 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4720 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4721 our_min_fee, our_max_fee, total_fee_satoshis);
4723 match &self.context.holder_signer {
4724 ChannelSignerType::Ecdsa(ecdsa) => {
4726 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4727 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4729 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4730 Ok((Some(msgs::ClosingSigned {
4731 channel_id: self.context.channel_id,
4732 fee_satoshis: total_fee_satoshis,
4734 fee_range: Some(msgs::ClosingSignedFeeRange {
4735 min_fee_satoshis: our_min_fee,
4736 max_fee_satoshis: our_max_fee,
4740 // TODO (taproot|arik)
4746 // Marks a channel as waiting for a response from the counterparty. If it's not received
4747 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4749 fn mark_awaiting_response(&mut self) {
4750 self.context.sent_message_awaiting_response = Some(0);
4753 /// Determines whether we should disconnect the counterparty due to not receiving a response
4754 /// within our expected timeframe.
4756 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4757 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4758 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4761 // Don't disconnect when we're not waiting on a response.
4764 *ticks_elapsed += 1;
4765 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4769 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4770 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4772 if self.context.channel_state.is_peer_disconnected() {
4773 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4775 if self.context.channel_state.is_pre_funded_state() {
4776 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4777 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4778 // can do that via error message without getting a connection fail anyway...
4779 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4781 for htlc in self.context.pending_inbound_htlcs.iter() {
4782 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4783 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4786 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4788 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4789 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4792 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4793 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4794 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4797 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4800 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4801 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4802 // any further commitment updates after we set LocalShutdownSent.
4803 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
4805 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4808 assert!(send_shutdown);
4809 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4810 Ok(scriptpubkey) => scriptpubkey,
4811 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4813 if !shutdown_scriptpubkey.is_compatible(their_features) {
4814 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4816 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4821 // From here on out, we may not fail!
4823 self.context.channel_state.set_remote_shutdown_sent();
4824 self.context.update_time_counter += 1;
4826 let monitor_update = if update_shutdown_script {
4827 self.context.latest_monitor_update_id += 1;
4828 let monitor_update = ChannelMonitorUpdate {
4829 update_id: self.context.latest_monitor_update_id,
4830 counterparty_node_id: Some(self.context.counterparty_node_id),
4831 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4832 scriptpubkey: self.get_closing_scriptpubkey(),
4835 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4836 self.push_ret_blockable_mon_update(monitor_update)
4838 let shutdown = if send_shutdown {
4839 Some(msgs::Shutdown {
4840 channel_id: self.context.channel_id,
4841 scriptpubkey: self.get_closing_scriptpubkey(),
4845 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4846 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4847 // cell HTLCs and return them to fail the payment.
4848 self.context.holding_cell_update_fee = None;
4849 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4850 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4852 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4853 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4860 self.context.channel_state.set_local_shutdown_sent();
4861 self.context.update_time_counter += 1;
4863 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4866 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4867 let mut tx = closing_tx.trust().built_transaction().clone();
4869 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4871 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4872 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4873 let mut holder_sig = sig.serialize_der().to_vec();
4874 holder_sig.push(EcdsaSighashType::All as u8);
4875 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4876 cp_sig.push(EcdsaSighashType::All as u8);
4877 if funding_key[..] < counterparty_funding_key[..] {
4878 tx.input[0].witness.push(holder_sig);
4879 tx.input[0].witness.push(cp_sig);
4881 tx.input[0].witness.push(cp_sig);
4882 tx.input[0].witness.push(holder_sig);
4885 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4889 pub fn closing_signed<F: Deref>(
4890 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4891 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4892 where F::Target: FeeEstimator
4894 if !self.context.channel_state.is_both_sides_shutdown() {
4895 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4897 if self.context.channel_state.is_peer_disconnected() {
4898 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4900 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4901 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4903 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4904 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4907 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4908 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4911 if self.context.channel_state.is_monitor_update_in_progress() {
4912 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4913 return Ok((None, None, None));
4916 let funding_redeemscript = self.context.get_funding_redeemscript();
4917 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4918 if used_total_fee != msg.fee_satoshis {
4919 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4921 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4923 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4926 // The remote end may have decided to revoke their output due to inconsistent dust
4927 // limits, so check for that case by re-checking the signature here.
4928 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4929 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4930 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4934 for outp in closing_tx.trust().built_transaction().output.iter() {
4935 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4936 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4940 assert!(self.context.shutdown_scriptpubkey.is_some());
4941 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4942 if last_fee == msg.fee_satoshis {
4943 let shutdown_result = ShutdownResult {
4944 monitor_update: None,
4945 dropped_outbound_htlcs: Vec::new(),
4946 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4947 channel_id: self.context.channel_id,
4948 counterparty_node_id: self.context.counterparty_node_id,
4949 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
4951 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4952 self.context.channel_state = ChannelState::ShutdownComplete;
4953 self.context.update_time_counter += 1;
4954 return Ok((None, Some(tx), Some(shutdown_result)));
4958 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4960 macro_rules! propose_fee {
4961 ($new_fee: expr) => {
4962 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4963 (closing_tx, $new_fee)
4965 self.build_closing_transaction($new_fee, false)
4968 return match &self.context.holder_signer {
4969 ChannelSignerType::Ecdsa(ecdsa) => {
4971 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4972 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4973 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4974 let shutdown_result = ShutdownResult {
4975 monitor_update: None,
4976 dropped_outbound_htlcs: Vec::new(),
4977 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4978 channel_id: self.context.channel_id,
4979 counterparty_node_id: self.context.counterparty_node_id,
4980 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
4982 self.context.channel_state = ChannelState::ShutdownComplete;
4983 self.context.update_time_counter += 1;
4984 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4985 (Some(tx), Some(shutdown_result))
4990 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4991 Ok((Some(msgs::ClosingSigned {
4992 channel_id: self.context.channel_id,
4993 fee_satoshis: used_fee,
4995 fee_range: Some(msgs::ClosingSignedFeeRange {
4996 min_fee_satoshis: our_min_fee,
4997 max_fee_satoshis: our_max_fee,
4999 }), signed_tx, shutdown_result))
5001 // TODO (taproot|arik)
5008 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
5009 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
5010 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
5012 if max_fee_satoshis < our_min_fee {
5013 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
5015 if min_fee_satoshis > our_max_fee {
5016 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
5019 if !self.context.is_outbound() {
5020 // They have to pay, so pick the highest fee in the overlapping range.
5021 // We should never set an upper bound aside from their full balance
5022 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
5023 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
5025 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
5026 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
5027 msg.fee_satoshis, our_min_fee, our_max_fee)));
5029 // The proposed fee is in our acceptable range, accept it and broadcast!
5030 propose_fee!(msg.fee_satoshis);
5033 // Old fee style negotiation. We don't bother to enforce whether they are complying
5034 // with the "making progress" requirements, we just comply and hope for the best.
5035 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
5036 if msg.fee_satoshis > last_fee {
5037 if msg.fee_satoshis < our_max_fee {
5038 propose_fee!(msg.fee_satoshis);
5039 } else if last_fee < our_max_fee {
5040 propose_fee!(our_max_fee);
5042 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5045 if msg.fee_satoshis > our_min_fee {
5046 propose_fee!(msg.fee_satoshis);
5047 } else if last_fee > our_min_fee {
5048 propose_fee!(our_min_fee);
5050 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5054 if msg.fee_satoshis < our_min_fee {
5055 propose_fee!(our_min_fee);
5056 } else if msg.fee_satoshis > our_max_fee {
5057 propose_fee!(our_max_fee);
5059 propose_fee!(msg.fee_satoshis);
5065 fn internal_htlc_satisfies_config(
5066 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
5067 ) -> Result<(), (&'static str, u16)> {
5068 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
5069 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
5070 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
5071 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5073 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5074 0x1000 | 12, // fee_insufficient
5077 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5079 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5080 0x1000 | 13, // incorrect_cltv_expiry
5086 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5087 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5088 /// unsuccessful, falls back to the previous one if one exists.
5089 pub fn htlc_satisfies_config(
5090 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5091 ) -> Result<(), (&'static str, u16)> {
5092 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5094 if let Some(prev_config) = self.context.prev_config() {
5095 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5102 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5103 self.context.cur_holder_commitment_transaction_number + 1
5106 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5107 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
5110 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5111 self.context.cur_counterparty_commitment_transaction_number + 2
5115 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5116 &self.context.holder_signer
5120 pub fn get_value_stat(&self) -> ChannelValueStat {
5122 value_to_self_msat: self.context.value_to_self_msat,
5123 channel_value_msat: self.context.channel_value_satoshis * 1000,
5124 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5125 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5126 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5127 holding_cell_outbound_amount_msat: {
5129 for h in self.context.holding_cell_htlc_updates.iter() {
5131 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5139 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5140 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5144 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5145 /// Allowed in any state (including after shutdown)
5146 pub fn is_awaiting_monitor_update(&self) -> bool {
5147 self.context.channel_state.is_monitor_update_in_progress()
5150 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5151 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5152 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5153 self.context.blocked_monitor_updates[0].update.update_id - 1
5156 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5157 /// further blocked monitor update exists after the next.
5158 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5159 if self.context.blocked_monitor_updates.is_empty() { return None; }
5160 Some((self.context.blocked_monitor_updates.remove(0).update,
5161 !self.context.blocked_monitor_updates.is_empty()))
5164 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5165 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5166 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5167 -> Option<ChannelMonitorUpdate> {
5168 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5169 if !release_monitor {
5170 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5179 pub fn blocked_monitor_updates_pending(&self) -> usize {
5180 self.context.blocked_monitor_updates.len()
5183 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5184 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5185 /// transaction. If the channel is inbound, this implies simply that the channel has not
5187 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5188 if !self.is_awaiting_monitor_update() { return false; }
5190 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5191 if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
5193 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5194 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5195 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5198 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5199 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5200 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5201 // waiting for the initial monitor persistence. Thus, we check if our commitment
5202 // transaction numbers have both been iterated only exactly once (for the
5203 // funding_signed), and we're awaiting monitor update.
5205 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5206 // only way to get an awaiting-monitor-update state during initial funding is if the
5207 // initial monitor persistence is still pending).
5209 // Because deciding we're awaiting initial broadcast spuriously could result in
5210 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5211 // we hard-assert here, even in production builds.
5212 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5213 assert!(self.context.monitor_pending_channel_ready);
5214 assert_eq!(self.context.latest_monitor_update_id, 0);
5220 /// Returns true if our channel_ready has been sent
5221 pub fn is_our_channel_ready(&self) -> bool {
5222 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5223 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5226 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5227 pub fn received_shutdown(&self) -> bool {
5228 self.context.channel_state.is_remote_shutdown_sent()
5231 /// Returns true if we either initiated or agreed to shut down the channel.
5232 pub fn sent_shutdown(&self) -> bool {
5233 self.context.channel_state.is_local_shutdown_sent()
5236 /// Returns true if this channel is fully shut down. True here implies that no further actions
5237 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5238 /// will be handled appropriately by the chain monitor.
5239 pub fn is_shutdown(&self) -> bool {
5240 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5243 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5244 self.context.channel_update_status
5247 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5248 self.context.update_time_counter += 1;
5249 self.context.channel_update_status = status;
5252 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5254 // * always when a new block/transactions are confirmed with the new height
5255 // * when funding is signed with a height of 0
5256 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5260 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5261 if funding_tx_confirmations <= 0 {
5262 self.context.funding_tx_confirmation_height = 0;
5265 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5269 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5270 // channel_ready yet.
5271 if self.context.signer_pending_funding {
5275 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5276 // channel_ready until the entire batch is ready.
5277 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
5278 self.context.channel_state.set_our_channel_ready();
5280 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5281 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5282 self.context.update_time_counter += 1;
5284 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5285 // We got a reorg but not enough to trigger a force close, just ignore.
5288 if self.context.funding_tx_confirmation_height != 0 &&
5289 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5291 // We should never see a funding transaction on-chain until we've received
5292 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5293 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5294 // however, may do this and we shouldn't treat it as a bug.
5295 #[cfg(not(fuzzing))]
5296 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5297 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5298 self.context.channel_state.to_u32());
5300 // We got a reorg but not enough to trigger a force close, just ignore.
5304 if need_commitment_update {
5305 if !self.context.channel_state.is_monitor_update_in_progress() {
5306 if !self.context.channel_state.is_peer_disconnected() {
5307 let next_per_commitment_point =
5308 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5309 return Some(msgs::ChannelReady {
5310 channel_id: self.context.channel_id,
5311 next_per_commitment_point,
5312 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5316 self.context.monitor_pending_channel_ready = true;
5322 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5323 /// In the first case, we store the confirmation height and calculating the short channel id.
5324 /// In the second, we simply return an Err indicating we need to be force-closed now.
5325 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5326 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5327 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5328 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5330 NS::Target: NodeSigner,
5333 let mut msgs = (None, None);
5334 if let Some(funding_txo) = self.context.get_funding_txo() {
5335 for &(index_in_block, tx) in txdata.iter() {
5336 // Check if the transaction is the expected funding transaction, and if it is,
5337 // check that it pays the right amount to the right script.
5338 if self.context.funding_tx_confirmation_height == 0 {
5339 if tx.txid() == funding_txo.txid {
5340 let txo_idx = funding_txo.index as usize;
5341 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5342 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5343 if self.context.is_outbound() {
5344 // If we generated the funding transaction and it doesn't match what it
5345 // should, the client is really broken and we should just panic and
5346 // tell them off. That said, because hash collisions happen with high
5347 // probability in fuzzing mode, if we're fuzzing we just close the
5348 // channel and move on.
5349 #[cfg(not(fuzzing))]
5350 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5352 self.context.update_time_counter += 1;
5353 let err_reason = "funding tx had wrong script/value or output index";
5354 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5356 if self.context.is_outbound() {
5357 if !tx.is_coin_base() {
5358 for input in tx.input.iter() {
5359 if input.witness.is_empty() {
5360 // We generated a malleable funding transaction, implying we've
5361 // just exposed ourselves to funds loss to our counterparty.
5362 #[cfg(not(fuzzing))]
5363 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5368 self.context.funding_tx_confirmation_height = height;
5369 self.context.funding_tx_confirmed_in = Some(*block_hash);
5370 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5371 Ok(scid) => Some(scid),
5372 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5375 // If this is a coinbase transaction and not a 0-conf channel
5376 // we should update our min_depth to 100 to handle coinbase maturity
5377 if tx.is_coin_base() &&
5378 self.context.minimum_depth.unwrap_or(0) > 0 &&
5379 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5380 self.context.minimum_depth = Some(COINBASE_MATURITY);
5383 // If we allow 1-conf funding, we may need to check for channel_ready here and
5384 // send it immediately instead of waiting for a best_block_updated call (which
5385 // may have already happened for this block).
5386 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5387 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5388 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5389 msgs = (Some(channel_ready), announcement_sigs);
5392 for inp in tx.input.iter() {
5393 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5394 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5395 return Err(ClosureReason::CommitmentTxConfirmed);
5403 /// When a new block is connected, we check the height of the block against outbound holding
5404 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5405 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5406 /// handled by the ChannelMonitor.
5408 /// If we return Err, the channel may have been closed, at which point the standard
5409 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5412 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5414 pub fn best_block_updated<NS: Deref, L: Deref>(
5415 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5416 node_signer: &NS, user_config: &UserConfig, logger: &L
5417 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5419 NS::Target: NodeSigner,
5422 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5425 fn do_best_block_updated<NS: Deref, L: Deref>(
5426 &mut self, height: u32, highest_header_time: u32,
5427 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5428 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5430 NS::Target: NodeSigner,
5433 let mut timed_out_htlcs = Vec::new();
5434 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5435 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5437 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5438 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5440 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5441 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5442 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5450 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5452 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5453 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5454 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5456 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5457 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5460 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5461 self.context.channel_state.is_our_channel_ready() {
5462 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5463 if self.context.funding_tx_confirmation_height == 0 {
5464 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5465 // zero if it has been reorged out, however in either case, our state flags
5466 // indicate we've already sent a channel_ready
5467 funding_tx_confirmations = 0;
5470 // If we've sent channel_ready (or have both sent and received channel_ready), and
5471 // the funding transaction has become unconfirmed,
5472 // close the channel and hope we can get the latest state on chain (because presumably
5473 // the funding transaction is at least still in the mempool of most nodes).
5475 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5476 // 0-conf channel, but not doing so may lead to the
5477 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5479 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5480 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5481 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5482 return Err(ClosureReason::ProcessingError { err: err_reason });
5484 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5485 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5486 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5487 // If funding_tx_confirmed_in is unset, the channel must not be active
5488 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5489 assert!(!self.context.channel_state.is_our_channel_ready());
5490 return Err(ClosureReason::FundingTimedOut);
5493 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5494 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5496 Ok((None, timed_out_htlcs, announcement_sigs))
5499 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5500 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5501 /// before the channel has reached channel_ready and we can just wait for more blocks.
5502 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5503 if self.context.funding_tx_confirmation_height != 0 {
5504 // We handle the funding disconnection by calling best_block_updated with a height one
5505 // below where our funding was connected, implying a reorg back to conf_height - 1.
5506 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5507 // We use the time field to bump the current time we set on channel updates if its
5508 // larger. If we don't know that time has moved forward, we can just set it to the last
5509 // time we saw and it will be ignored.
5510 let best_time = self.context.update_time_counter;
5511 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
5512 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5513 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5514 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5515 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5521 // We never learned about the funding confirmation anyway, just ignore
5526 // Methods to get unprompted messages to send to the remote end (or where we already returned
5527 // something in the handler for the message that prompted this message):
5529 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5530 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5531 /// directions). Should be used for both broadcasted announcements and in response to an
5532 /// AnnouncementSignatures message from the remote peer.
5534 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5537 /// This will only return ChannelError::Ignore upon failure.
5539 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5540 fn get_channel_announcement<NS: Deref>(
5541 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5542 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5543 if !self.context.config.announced_channel {
5544 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5546 if !self.context.is_usable() {
5547 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5550 let short_channel_id = self.context.get_short_channel_id()
5551 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5552 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5553 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5554 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5555 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5557 let msg = msgs::UnsignedChannelAnnouncement {
5558 features: channelmanager::provided_channel_features(&user_config),
5561 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5562 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5563 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5564 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5565 excess_data: Vec::new(),
5571 fn get_announcement_sigs<NS: Deref, L: Deref>(
5572 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5573 best_block_height: u32, logger: &L
5574 ) -> Option<msgs::AnnouncementSignatures>
5576 NS::Target: NodeSigner,
5579 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5583 if !self.context.is_usable() {
5587 if self.context.channel_state.is_peer_disconnected() {
5588 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5592 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5596 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5597 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5600 log_trace!(logger, "{:?}", e);
5604 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5606 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5611 match &self.context.holder_signer {
5612 ChannelSignerType::Ecdsa(ecdsa) => {
5613 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5615 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5620 let short_channel_id = match self.context.get_short_channel_id() {
5622 None => return None,
5625 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5627 Some(msgs::AnnouncementSignatures {
5628 channel_id: self.context.channel_id(),
5630 node_signature: our_node_sig,
5631 bitcoin_signature: our_bitcoin_sig,
5634 // TODO (taproot|arik)
5640 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5642 fn sign_channel_announcement<NS: Deref>(
5643 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5644 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5645 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5646 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5647 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5648 let were_node_one = announcement.node_id_1 == our_node_key;
5650 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5651 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5652 match &self.context.holder_signer {
5653 ChannelSignerType::Ecdsa(ecdsa) => {
5654 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5655 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5656 Ok(msgs::ChannelAnnouncement {
5657 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5658 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5659 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5660 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5661 contents: announcement,
5664 // TODO (taproot|arik)
5669 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5673 /// Processes an incoming announcement_signatures message, providing a fully-signed
5674 /// channel_announcement message which we can broadcast and storing our counterparty's
5675 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5676 pub fn announcement_signatures<NS: Deref>(
5677 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5678 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5679 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5680 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5682 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5684 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5685 return Err(ChannelError::Close(format!(
5686 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5687 &announcement, self.context.get_counterparty_node_id())));
5689 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5690 return Err(ChannelError::Close(format!(
5691 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5692 &announcement, self.context.counterparty_funding_pubkey())));
5695 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5696 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5697 return Err(ChannelError::Ignore(
5698 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5701 self.sign_channel_announcement(node_signer, announcement)
5704 /// Gets a signed channel_announcement for this channel, if we previously received an
5705 /// announcement_signatures from our counterparty.
5706 pub fn get_signed_channel_announcement<NS: Deref>(
5707 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5708 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5709 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5712 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5714 Err(_) => return None,
5716 match self.sign_channel_announcement(node_signer, announcement) {
5717 Ok(res) => Some(res),
5722 /// May panic if called on a channel that wasn't immediately-previously
5723 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5724 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5725 assert!(self.context.channel_state.is_peer_disconnected());
5726 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5727 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5728 // current to_remote balances. However, it no longer has any use, and thus is now simply
5729 // set to a dummy (but valid, as required by the spec) public key.
5730 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5731 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5732 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5733 let mut pk = [2; 33]; pk[1] = 0xff;
5734 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5735 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5736 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5737 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5740 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5743 self.mark_awaiting_response();
5744 msgs::ChannelReestablish {
5745 channel_id: self.context.channel_id(),
5746 // The protocol has two different commitment number concepts - the "commitment
5747 // transaction number", which starts from 0 and counts up, and the "revocation key
5748 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5749 // commitment transaction numbers by the index which will be used to reveal the
5750 // revocation key for that commitment transaction, which means we have to convert them
5751 // to protocol-level commitment numbers here...
5753 // next_local_commitment_number is the next commitment_signed number we expect to
5754 // receive (indicating if they need to resend one that we missed).
5755 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5756 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5757 // receive, however we track it by the next commitment number for a remote transaction
5758 // (which is one further, as they always revoke previous commitment transaction, not
5759 // the one we send) so we have to decrement by 1. Note that if
5760 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5761 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
5763 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5764 your_last_per_commitment_secret: remote_last_secret,
5765 my_current_per_commitment_point: dummy_pubkey,
5766 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5767 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5768 // txid of that interactive transaction, else we MUST NOT set it.
5769 next_funding_txid: None,
5774 // Send stuff to our remote peers:
5776 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5777 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5778 /// commitment update.
5780 /// `Err`s will only be [`ChannelError::Ignore`].
5781 pub fn queue_add_htlc<F: Deref, L: Deref>(
5782 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5783 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5784 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5785 ) -> Result<(), ChannelError>
5786 where F::Target: FeeEstimator, L::Target: Logger
5789 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5790 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5791 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5793 if let ChannelError::Ignore(_) = err { /* fine */ }
5794 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5799 /// Adds a pending outbound HTLC to this channel, note that you probably want
5800 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5802 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5804 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5805 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5807 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5808 /// we may not yet have sent the previous commitment update messages and will need to
5809 /// regenerate them.
5811 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5812 /// on this [`Channel`] if `force_holding_cell` is false.
5814 /// `Err`s will only be [`ChannelError::Ignore`].
5815 fn send_htlc<F: Deref, L: Deref>(
5816 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5817 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5818 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5819 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5820 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5821 where F::Target: FeeEstimator, L::Target: Logger
5823 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5824 self.context.channel_state.is_local_shutdown_sent() ||
5825 self.context.channel_state.is_remote_shutdown_sent()
5827 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5829 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5830 if amount_msat > channel_total_msat {
5831 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5834 if amount_msat == 0 {
5835 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5838 let available_balances = self.context.get_available_balances(fee_estimator);
5839 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5840 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5841 available_balances.next_outbound_htlc_minimum_msat)));
5844 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5845 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5846 available_balances.next_outbound_htlc_limit_msat)));
5849 if self.context.channel_state.is_peer_disconnected() {
5850 // Note that this should never really happen, if we're !is_live() on receipt of an
5851 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5852 // the user to send directly into a !is_live() channel. However, if we
5853 // disconnected during the time the previous hop was doing the commitment dance we may
5854 // end up getting here after the forwarding delay. In any case, returning an
5855 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5856 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5859 let need_holding_cell = self.context.channel_state.should_force_holding_cell();
5860 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5861 payment_hash, amount_msat,
5862 if force_holding_cell { "into holding cell" }
5863 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5864 else { "to peer" });
5866 if need_holding_cell {
5867 force_holding_cell = true;
5870 // Now update local state:
5871 if force_holding_cell {
5872 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5877 onion_routing_packet,
5884 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5885 htlc_id: self.context.next_holder_htlc_id,
5887 payment_hash: payment_hash.clone(),
5889 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5895 let res = msgs::UpdateAddHTLC {
5896 channel_id: self.context.channel_id,
5897 htlc_id: self.context.next_holder_htlc_id,
5901 onion_routing_packet,
5905 self.context.next_holder_htlc_id += 1;
5910 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5911 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5912 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5913 // fail to generate this, we still are at least at a position where upgrading their status
5915 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5916 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5917 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5919 if let Some(state) = new_state {
5920 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5924 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5925 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5926 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5927 // Grab the preimage, if it exists, instead of cloning
5928 let mut reason = OutboundHTLCOutcome::Success(None);
5929 mem::swap(outcome, &mut reason);
5930 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5933 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5934 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5935 debug_assert!(!self.context.is_outbound());
5936 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5937 self.context.feerate_per_kw = feerate;
5938 self.context.pending_update_fee = None;
5941 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5943 let (mut htlcs_ref, counterparty_commitment_tx) =
5944 self.build_commitment_no_state_update(logger);
5945 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5946 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5947 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5949 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5950 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5953 self.context.latest_monitor_update_id += 1;
5954 let monitor_update = ChannelMonitorUpdate {
5955 update_id: self.context.latest_monitor_update_id,
5956 counterparty_node_id: Some(self.context.counterparty_node_id),
5957 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5958 commitment_txid: counterparty_commitment_txid,
5959 htlc_outputs: htlcs.clone(),
5960 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5961 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5962 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5963 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5964 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5967 self.context.channel_state.set_awaiting_remote_revoke();
5971 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5972 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5973 where L::Target: Logger
5975 let counterparty_keys = self.context.build_remote_transaction_keys();
5976 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5977 let counterparty_commitment_tx = commitment_stats.tx;
5979 #[cfg(any(test, fuzzing))]
5981 if !self.context.is_outbound() {
5982 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5983 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5984 if let Some(info) = projected_commit_tx_info {
5985 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5986 if info.total_pending_htlcs == total_pending_htlcs
5987 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5988 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5989 && info.feerate == self.context.feerate_per_kw {
5990 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5991 assert_eq!(actual_fee, info.fee);
5997 (commitment_stats.htlcs_included, counterparty_commitment_tx)
6000 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
6001 /// generation when we shouldn't change HTLC/channel state.
6002 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
6003 // Get the fee tests from `build_commitment_no_state_update`
6004 #[cfg(any(test, fuzzing))]
6005 self.build_commitment_no_state_update(logger);
6007 let counterparty_keys = self.context.build_remote_transaction_keys();
6008 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6009 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
6011 match &self.context.holder_signer {
6012 ChannelSignerType::Ecdsa(ecdsa) => {
6013 let (signature, htlc_signatures);
6016 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
6017 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
6021 let res = ecdsa.sign_counterparty_commitment(
6022 &commitment_stats.tx,
6023 commitment_stats.inbound_htlc_preimages,
6024 commitment_stats.outbound_htlc_preimages,
6025 &self.context.secp_ctx,
6026 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
6028 htlc_signatures = res.1;
6030 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
6031 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
6032 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
6033 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
6035 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6036 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6037 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6038 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
6039 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
6040 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
6044 Ok((msgs::CommitmentSigned {
6045 channel_id: self.context.channel_id,
6049 partial_signature_with_nonce: None,
6050 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6052 // TODO (taproot|arik)
6058 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6059 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6061 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6062 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6063 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
6064 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
6065 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6066 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6067 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
6068 where F::Target: FeeEstimator, L::Target: Logger
6070 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
6071 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
6072 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6075 let monitor_update = self.build_commitment_no_status_check(logger);
6076 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6077 Ok(self.push_ret_blockable_mon_update(monitor_update))
6083 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
6085 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
6086 let new_forwarding_info = Some(CounterpartyForwardingInfo {
6087 fee_base_msat: msg.contents.fee_base_msat,
6088 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6089 cltv_expiry_delta: msg.contents.cltv_expiry_delta
6091 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
6093 self.context.counterparty_forwarding_info = new_forwarding_info;
6099 /// Begins the shutdown process, getting a message for the remote peer and returning all
6100 /// holding cell HTLCs for payment failure.
6101 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6102 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6103 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6105 for htlc in self.context.pending_outbound_htlcs.iter() {
6106 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6107 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6110 if self.context.channel_state.is_local_shutdown_sent() {
6111 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6113 else if self.context.channel_state.is_remote_shutdown_sent() {
6114 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6116 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6117 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6119 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6120 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6121 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6124 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6127 // use override shutdown script if provided
6128 let shutdown_scriptpubkey = match override_shutdown_script {
6129 Some(script) => script,
6131 // otherwise, use the shutdown scriptpubkey provided by the signer
6132 match signer_provider.get_shutdown_scriptpubkey() {
6133 Ok(scriptpubkey) => scriptpubkey,
6134 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6138 if !shutdown_scriptpubkey.is_compatible(their_features) {
6139 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6141 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6146 // From here on out, we may not fail!
6147 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6148 self.context.channel_state.set_local_shutdown_sent();
6149 self.context.update_time_counter += 1;
6151 let monitor_update = if update_shutdown_script {
6152 self.context.latest_monitor_update_id += 1;
6153 let monitor_update = ChannelMonitorUpdate {
6154 update_id: self.context.latest_monitor_update_id,
6155 counterparty_node_id: Some(self.context.counterparty_node_id),
6156 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6157 scriptpubkey: self.get_closing_scriptpubkey(),
6160 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6161 self.push_ret_blockable_mon_update(monitor_update)
6163 let shutdown = msgs::Shutdown {
6164 channel_id: self.context.channel_id,
6165 scriptpubkey: self.get_closing_scriptpubkey(),
6168 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6169 // our shutdown until we've committed all of the pending changes.
6170 self.context.holding_cell_update_fee = None;
6171 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6172 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6174 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6175 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6182 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6183 "we can't both complete shutdown and return a monitor update");
6185 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6188 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6189 self.context.holding_cell_htlc_updates.iter()
6190 .flat_map(|htlc_update| {
6192 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6193 => Some((source, payment_hash)),
6197 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6201 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6202 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6203 pub context: ChannelContext<SP>,
6204 pub unfunded_context: UnfundedChannelContext,
6207 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6208 pub fn new<ES: Deref, F: Deref>(
6209 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6210 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6211 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6212 ) -> Result<OutboundV1Channel<SP>, APIError>
6213 where ES::Target: EntropySource,
6214 F::Target: FeeEstimator
6216 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6217 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6218 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6219 let pubkeys = holder_signer.pubkeys().clone();
6221 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6222 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6224 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6225 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6227 let channel_value_msat = channel_value_satoshis * 1000;
6228 if push_msat > channel_value_msat {
6229 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6231 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6232 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6234 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6235 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6236 // Protocol level safety check in place, although it should never happen because
6237 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6238 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6241 let channel_type = Self::get_initial_channel_type(&config, their_features);
6242 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6244 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6245 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6247 (ConfirmationTarget::NonAnchorChannelFee, 0)
6249 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6251 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6252 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6253 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6254 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6257 let mut secp_ctx = Secp256k1::new();
6258 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6260 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6261 match signer_provider.get_shutdown_scriptpubkey() {
6262 Ok(scriptpubkey) => Some(scriptpubkey),
6263 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6267 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6268 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6269 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6273 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6274 Ok(script) => script,
6275 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6278 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6281 context: ChannelContext {
6284 config: LegacyChannelConfig {
6285 options: config.channel_config.clone(),
6286 announced_channel: config.channel_handshake_config.announced_channel,
6287 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6292 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6294 channel_id: temporary_channel_id,
6295 temporary_channel_id: Some(temporary_channel_id),
6296 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6297 announcement_sigs_state: AnnouncementSigsState::NotSent,
6299 channel_value_satoshis,
6301 latest_monitor_update_id: 0,
6303 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6304 shutdown_scriptpubkey,
6307 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6308 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6311 pending_inbound_htlcs: Vec::new(),
6312 pending_outbound_htlcs: Vec::new(),
6313 holding_cell_htlc_updates: Vec::new(),
6314 pending_update_fee: None,
6315 holding_cell_update_fee: None,
6316 next_holder_htlc_id: 0,
6317 next_counterparty_htlc_id: 0,
6318 update_time_counter: 1,
6320 resend_order: RAACommitmentOrder::CommitmentFirst,
6322 monitor_pending_channel_ready: false,
6323 monitor_pending_revoke_and_ack: false,
6324 monitor_pending_commitment_signed: false,
6325 monitor_pending_forwards: Vec::new(),
6326 monitor_pending_failures: Vec::new(),
6327 monitor_pending_finalized_fulfills: Vec::new(),
6329 signer_pending_commitment_update: false,
6330 signer_pending_funding: false,
6332 #[cfg(debug_assertions)]
6333 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6334 #[cfg(debug_assertions)]
6335 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6337 last_sent_closing_fee: None,
6338 pending_counterparty_closing_signed: None,
6339 expecting_peer_commitment_signed: false,
6340 closing_fee_limits: None,
6341 target_closing_feerate_sats_per_kw: None,
6343 funding_tx_confirmed_in: None,
6344 funding_tx_confirmation_height: 0,
6345 short_channel_id: None,
6346 channel_creation_height: current_chain_height,
6348 feerate_per_kw: commitment_feerate,
6349 counterparty_dust_limit_satoshis: 0,
6350 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6351 counterparty_max_htlc_value_in_flight_msat: 0,
6352 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6353 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6354 holder_selected_channel_reserve_satoshis,
6355 counterparty_htlc_minimum_msat: 0,
6356 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6357 counterparty_max_accepted_htlcs: 0,
6358 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6359 minimum_depth: None, // Filled in in accept_channel
6361 counterparty_forwarding_info: None,
6363 channel_transaction_parameters: ChannelTransactionParameters {
6364 holder_pubkeys: pubkeys,
6365 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6366 is_outbound_from_holder: true,
6367 counterparty_parameters: None,
6368 funding_outpoint: None,
6369 channel_type_features: channel_type.clone()
6371 funding_transaction: None,
6372 is_batch_funding: None,
6374 counterparty_cur_commitment_point: None,
6375 counterparty_prev_commitment_point: None,
6376 counterparty_node_id,
6378 counterparty_shutdown_scriptpubkey: None,
6380 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6382 channel_update_status: ChannelUpdateStatus::Enabled,
6383 closing_signed_in_flight: false,
6385 announcement_sigs: None,
6387 #[cfg(any(test, fuzzing))]
6388 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6389 #[cfg(any(test, fuzzing))]
6390 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6392 workaround_lnd_bug_4006: None,
6393 sent_message_awaiting_response: None,
6395 latest_inbound_scid_alias: None,
6396 outbound_scid_alias,
6398 channel_pending_event_emitted: false,
6399 channel_ready_event_emitted: false,
6401 #[cfg(any(test, fuzzing))]
6402 historical_inbound_htlc_fulfills: HashSet::new(),
6407 blocked_monitor_updates: Vec::new(),
6409 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6413 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
6414 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6415 let counterparty_keys = self.context.build_remote_transaction_keys();
6416 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6417 let signature = match &self.context.holder_signer {
6418 // TODO (taproot|arik): move match into calling method for Taproot
6419 ChannelSignerType::Ecdsa(ecdsa) => {
6420 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
6421 .map(|(sig, _)| sig).ok()?
6423 // TODO (taproot|arik)
6428 if self.context.signer_pending_funding {
6429 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
6430 self.context.signer_pending_funding = false;
6433 Some(msgs::FundingCreated {
6434 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
6435 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
6436 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
6439 partial_signature_with_nonce: None,
6441 next_local_nonce: None,
6445 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6446 /// a funding_created message for the remote peer.
6447 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6448 /// or if called on an inbound channel.
6449 /// Note that channel_id changes during this call!
6450 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6451 /// If an Err is returned, it is a ChannelError::Close.
6452 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6453 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
6454 if !self.context.is_outbound() {
6455 panic!("Tried to create outbound funding_created message on an inbound channel!");
6458 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6459 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6461 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6463 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6464 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6465 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6466 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6469 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6470 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6472 // Now that we're past error-generating stuff, update our local state:
6474 self.context.channel_state = ChannelState::FundingNegotiated;
6475 self.context.channel_id = funding_txo.to_channel_id();
6477 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6478 // We can skip this if it is a zero-conf channel.
6479 if funding_transaction.is_coin_base() &&
6480 self.context.minimum_depth.unwrap_or(0) > 0 &&
6481 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6482 self.context.minimum_depth = Some(COINBASE_MATURITY);
6485 self.context.funding_transaction = Some(funding_transaction);
6486 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6488 let funding_created = self.get_funding_created_msg(logger);
6489 if funding_created.is_none() {
6490 #[cfg(not(async_signing))] {
6491 panic!("Failed to get signature for new funding creation");
6493 #[cfg(async_signing)] {
6494 if !self.context.signer_pending_funding {
6495 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6496 self.context.signer_pending_funding = true;
6504 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6505 // The default channel type (ie the first one we try) depends on whether the channel is
6506 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6507 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6508 // with no other changes, and fall back to `only_static_remotekey`.
6509 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6510 if !config.channel_handshake_config.announced_channel &&
6511 config.channel_handshake_config.negotiate_scid_privacy &&
6512 their_features.supports_scid_privacy() {
6513 ret.set_scid_privacy_required();
6516 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6517 // set it now. If they don't understand it, we'll fall back to our default of
6518 // `only_static_remotekey`.
6519 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6520 their_features.supports_anchors_zero_fee_htlc_tx() {
6521 ret.set_anchors_zero_fee_htlc_tx_required();
6527 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6528 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6529 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6530 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6531 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6532 ) -> Result<msgs::OpenChannel, ()>
6534 F::Target: FeeEstimator
6536 if !self.context.is_outbound() ||
6538 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6539 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6544 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6545 // We've exhausted our options
6548 // We support opening a few different types of channels. Try removing our additional
6549 // features one by one until we've either arrived at our default or the counterparty has
6552 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6553 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6554 // checks whether the counterparty supports every feature, this would only happen if the
6555 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6557 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6558 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6559 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6560 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6561 } else if self.context.channel_type.supports_scid_privacy() {
6562 self.context.channel_type.clear_scid_privacy();
6564 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6566 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6567 Ok(self.get_open_channel(chain_hash))
6570 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6571 if !self.context.is_outbound() {
6572 panic!("Tried to open a channel for an inbound channel?");
6574 if self.context.have_received_message() {
6575 panic!("Cannot generate an open_channel after we've moved forward");
6578 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6579 panic!("Tried to send an open_channel for a channel that has already advanced");
6582 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6583 let keys = self.context.get_holder_pubkeys();
6587 temporary_channel_id: self.context.channel_id,
6588 funding_satoshis: self.context.channel_value_satoshis,
6589 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6590 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6591 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6592 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6593 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6594 feerate_per_kw: self.context.feerate_per_kw as u32,
6595 to_self_delay: self.context.get_holder_selected_contest_delay(),
6596 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6597 funding_pubkey: keys.funding_pubkey,
6598 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6599 payment_point: keys.payment_point,
6600 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6601 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6602 first_per_commitment_point,
6603 channel_flags: if self.context.config.announced_channel {1} else {0},
6604 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6605 Some(script) => script.clone().into_inner(),
6606 None => Builder::new().into_script(),
6608 channel_type: Some(self.context.channel_type.clone()),
6613 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6614 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6616 // Check sanity of message fields:
6617 if !self.context.is_outbound() {
6618 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6620 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
6621 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6623 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6624 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6626 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6627 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6629 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6630 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6632 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6633 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6634 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6636 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6637 if msg.htlc_minimum_msat >= full_channel_value_msat {
6638 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6640 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6641 if msg.to_self_delay > max_delay_acceptable {
6642 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6644 if msg.max_accepted_htlcs < 1 {
6645 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6647 if msg.max_accepted_htlcs > MAX_HTLCS {
6648 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6651 // Now check against optional parameters as set by config...
6652 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6653 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6655 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6656 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6658 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6659 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6661 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6662 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6664 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6665 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6667 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6668 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6670 if msg.minimum_depth > peer_limits.max_minimum_depth {
6671 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6674 if let Some(ty) = &msg.channel_type {
6675 if *ty != self.context.channel_type {
6676 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6678 } else if their_features.supports_channel_type() {
6679 // Assume they've accepted the channel type as they said they understand it.
6681 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6682 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6683 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6685 self.context.channel_type = channel_type.clone();
6686 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6689 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6690 match &msg.shutdown_scriptpubkey {
6691 &Some(ref script) => {
6692 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6693 if script.len() == 0 {
6696 if !script::is_bolt2_compliant(&script, their_features) {
6697 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6699 Some(script.clone())
6702 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6704 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6709 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6710 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6711 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6712 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6713 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6715 if peer_limits.trust_own_funding_0conf {
6716 self.context.minimum_depth = Some(msg.minimum_depth);
6718 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6721 let counterparty_pubkeys = ChannelPublicKeys {
6722 funding_pubkey: msg.funding_pubkey,
6723 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6724 payment_point: msg.payment_point,
6725 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6726 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6729 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6730 selected_contest_delay: msg.to_self_delay,
6731 pubkeys: counterparty_pubkeys,
6734 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6735 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6737 self.context.channel_state = ChannelState::NegotiatingFunding(
6738 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6740 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6745 /// Handles a funding_signed message from the remote end.
6746 /// If this call is successful, broadcast the funding transaction (and not before!)
6747 pub fn funding_signed<L: Deref>(
6748 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
6749 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
6753 if !self.context.is_outbound() {
6754 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
6756 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
6757 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
6759 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6760 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6761 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6762 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6765 let funding_script = self.context.get_funding_redeemscript();
6767 let counterparty_keys = self.context.build_remote_transaction_keys();
6768 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6769 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6770 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6772 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6773 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6775 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6776 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
6778 let trusted_tx = initial_commitment_tx.trust();
6779 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6780 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6781 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
6782 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
6783 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
6787 let holder_commitment_tx = HolderCommitmentTransaction::new(
6788 initial_commitment_tx,
6791 &self.context.get_holder_pubkeys().funding_pubkey,
6792 self.context.counterparty_funding_pubkey()
6796 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
6797 if validated.is_err() {
6798 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6801 let funding_redeemscript = self.context.get_funding_redeemscript();
6802 let funding_txo = self.context.get_funding_txo().unwrap();
6803 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6804 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6805 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6806 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6807 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6808 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6809 shutdown_script, self.context.get_holder_selected_contest_delay(),
6810 &self.context.destination_script, (funding_txo, funding_txo_script),
6811 &self.context.channel_transaction_parameters,
6812 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6814 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6815 channel_monitor.provide_initial_counterparty_commitment_tx(
6816 counterparty_initial_bitcoin_tx.txid, Vec::new(),
6817 self.context.cur_counterparty_commitment_transaction_number,
6818 self.context.counterparty_cur_commitment_point.unwrap(),
6819 counterparty_initial_commitment_tx.feerate_per_kw(),
6820 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6821 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6823 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
6824 if self.context.is_batch_funding() {
6825 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
6827 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
6829 self.context.cur_holder_commitment_transaction_number -= 1;
6830 self.context.cur_counterparty_commitment_transaction_number -= 1;
6832 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
6834 let mut channel = Channel { context: self.context };
6836 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6837 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6838 Ok((channel, channel_monitor))
6841 /// Indicates that the signer may have some signatures for us, so we should retry if we're
6843 #[cfg(async_signing)]
6844 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6845 if self.context.signer_pending_funding && self.context.is_outbound() {
6846 log_trace!(logger, "Signer unblocked a funding_created");
6847 self.get_funding_created_msg(logger)
6852 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6853 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6854 pub context: ChannelContext<SP>,
6855 pub unfunded_context: UnfundedChannelContext,
6858 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6859 /// Creates a new channel from a remote sides' request for one.
6860 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6861 pub fn new<ES: Deref, F: Deref, L: Deref>(
6862 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6863 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6864 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6865 current_chain_height: u32, logger: &L, is_0conf: bool,
6866 ) -> Result<InboundV1Channel<SP>, ChannelError>
6867 where ES::Target: EntropySource,
6868 F::Target: FeeEstimator,
6871 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6872 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6874 // First check the channel type is known, failing before we do anything else if we don't
6875 // support this channel type.
6876 let channel_type = if let Some(channel_type) = &msg.channel_type {
6877 if channel_type.supports_any_optional_bits() {
6878 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6881 // We only support the channel types defined by the `ChannelManager` in
6882 // `provided_channel_type_features`. The channel type must always support
6883 // `static_remote_key`.
6884 if !channel_type.requires_static_remote_key() {
6885 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6887 // Make sure we support all of the features behind the channel type.
6888 if !channel_type.is_subset(our_supported_features) {
6889 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6891 if channel_type.requires_scid_privacy() && announced_channel {
6892 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6894 channel_type.clone()
6896 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6897 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6898 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6903 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6904 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6905 let pubkeys = holder_signer.pubkeys().clone();
6906 let counterparty_pubkeys = ChannelPublicKeys {
6907 funding_pubkey: msg.funding_pubkey,
6908 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6909 payment_point: msg.payment_point,
6910 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6911 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6914 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6915 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6918 // Check sanity of message fields:
6919 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6920 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6922 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6923 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6925 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6926 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6928 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6929 if msg.push_msat > full_channel_value_msat {
6930 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6932 if msg.dust_limit_satoshis > msg.funding_satoshis {
6933 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6935 if msg.htlc_minimum_msat >= full_channel_value_msat {
6936 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6938 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6940 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6941 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6942 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6944 if msg.max_accepted_htlcs < 1 {
6945 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6947 if msg.max_accepted_htlcs > MAX_HTLCS {
6948 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6951 // Now check against optional parameters as set by config...
6952 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6953 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6955 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6956 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6958 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6959 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6961 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6962 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6964 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6965 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6967 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6968 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6970 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6971 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6974 // Convert things into internal flags and prep our state:
6976 if config.channel_handshake_limits.force_announced_channel_preference {
6977 if config.channel_handshake_config.announced_channel != announced_channel {
6978 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6982 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6983 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6984 // Protocol level safety check in place, although it should never happen because
6985 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6986 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6988 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6989 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6991 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6992 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6993 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6995 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6996 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6999 // check if the funder's amount for the initial commitment tx is sufficient
7000 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
7001 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
7002 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
7006 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
7007 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
7008 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
7009 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
7012 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
7013 // While it's reasonable for us to not meet the channel reserve initially (if they don't
7014 // want to push much to us), our counterparty should always have more than our reserve.
7015 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
7016 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
7019 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7020 match &msg.shutdown_scriptpubkey {
7021 &Some(ref script) => {
7022 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7023 if script.len() == 0 {
7026 if !script::is_bolt2_compliant(&script, their_features) {
7027 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
7029 Some(script.clone())
7032 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7034 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7039 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
7040 match signer_provider.get_shutdown_scriptpubkey() {
7041 Ok(scriptpubkey) => Some(scriptpubkey),
7042 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
7046 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
7047 if !shutdown_scriptpubkey.is_compatible(&their_features) {
7048 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
7052 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
7053 Ok(script) => script,
7054 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
7057 let mut secp_ctx = Secp256k1::new();
7058 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7060 let minimum_depth = if is_0conf {
7063 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
7067 context: ChannelContext {
7070 config: LegacyChannelConfig {
7071 options: config.channel_config.clone(),
7073 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
7078 inbound_handshake_limits_override: None,
7080 temporary_channel_id: Some(msg.temporary_channel_id),
7081 channel_id: msg.temporary_channel_id,
7082 channel_state: ChannelState::NegotiatingFunding(
7083 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7085 announcement_sigs_state: AnnouncementSigsState::NotSent,
7088 latest_monitor_update_id: 0,
7090 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7091 shutdown_scriptpubkey,
7094 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7095 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7096 value_to_self_msat: msg.push_msat,
7098 pending_inbound_htlcs: Vec::new(),
7099 pending_outbound_htlcs: Vec::new(),
7100 holding_cell_htlc_updates: Vec::new(),
7101 pending_update_fee: None,
7102 holding_cell_update_fee: None,
7103 next_holder_htlc_id: 0,
7104 next_counterparty_htlc_id: 0,
7105 update_time_counter: 1,
7107 resend_order: RAACommitmentOrder::CommitmentFirst,
7109 monitor_pending_channel_ready: false,
7110 monitor_pending_revoke_and_ack: false,
7111 monitor_pending_commitment_signed: false,
7112 monitor_pending_forwards: Vec::new(),
7113 monitor_pending_failures: Vec::new(),
7114 monitor_pending_finalized_fulfills: Vec::new(),
7116 signer_pending_commitment_update: false,
7117 signer_pending_funding: false,
7119 #[cfg(debug_assertions)]
7120 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7121 #[cfg(debug_assertions)]
7122 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7124 last_sent_closing_fee: None,
7125 pending_counterparty_closing_signed: None,
7126 expecting_peer_commitment_signed: false,
7127 closing_fee_limits: None,
7128 target_closing_feerate_sats_per_kw: None,
7130 funding_tx_confirmed_in: None,
7131 funding_tx_confirmation_height: 0,
7132 short_channel_id: None,
7133 channel_creation_height: current_chain_height,
7135 feerate_per_kw: msg.feerate_per_kw,
7136 channel_value_satoshis: msg.funding_satoshis,
7137 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
7138 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7139 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
7140 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
7141 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7142 holder_selected_channel_reserve_satoshis,
7143 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
7144 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7145 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
7146 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7149 counterparty_forwarding_info: None,
7151 channel_transaction_parameters: ChannelTransactionParameters {
7152 holder_pubkeys: pubkeys,
7153 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7154 is_outbound_from_holder: false,
7155 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7156 selected_contest_delay: msg.to_self_delay,
7157 pubkeys: counterparty_pubkeys,
7159 funding_outpoint: None,
7160 channel_type_features: channel_type.clone()
7162 funding_transaction: None,
7163 is_batch_funding: None,
7165 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7166 counterparty_prev_commitment_point: None,
7167 counterparty_node_id,
7169 counterparty_shutdown_scriptpubkey,
7171 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7173 channel_update_status: ChannelUpdateStatus::Enabled,
7174 closing_signed_in_flight: false,
7176 announcement_sigs: None,
7178 #[cfg(any(test, fuzzing))]
7179 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7180 #[cfg(any(test, fuzzing))]
7181 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7183 workaround_lnd_bug_4006: None,
7184 sent_message_awaiting_response: None,
7186 latest_inbound_scid_alias: None,
7187 outbound_scid_alias: 0,
7189 channel_pending_event_emitted: false,
7190 channel_ready_event_emitted: false,
7192 #[cfg(any(test, fuzzing))]
7193 historical_inbound_htlc_fulfills: HashSet::new(),
7198 blocked_monitor_updates: Vec::new(),
7200 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7206 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7207 /// should be sent back to the counterparty node.
7209 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7210 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7211 if self.context.is_outbound() {
7212 panic!("Tried to send accept_channel for an outbound channel?");
7215 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7216 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7218 panic!("Tried to send accept_channel after channel had moved forward");
7220 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7221 panic!("Tried to send an accept_channel for a channel that has already advanced");
7224 self.generate_accept_channel_message()
7227 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7228 /// inbound channel. If the intention is to accept an inbound channel, use
7229 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7231 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7232 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7233 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7234 let keys = self.context.get_holder_pubkeys();
7236 msgs::AcceptChannel {
7237 temporary_channel_id: self.context.channel_id,
7238 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7239 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7240 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7241 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7242 minimum_depth: self.context.minimum_depth.unwrap(),
7243 to_self_delay: self.context.get_holder_selected_contest_delay(),
7244 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7245 funding_pubkey: keys.funding_pubkey,
7246 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7247 payment_point: keys.payment_point,
7248 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7249 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7250 first_per_commitment_point,
7251 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7252 Some(script) => script.clone().into_inner(),
7253 None => Builder::new().into_script(),
7255 channel_type: Some(self.context.channel_type.clone()),
7257 next_local_nonce: None,
7261 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7262 /// inbound channel without accepting it.
7264 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7266 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7267 self.generate_accept_channel_message()
7270 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7271 let funding_script = self.context.get_funding_redeemscript();
7273 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7274 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7275 let trusted_tx = initial_commitment_tx.trust();
7276 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7277 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7278 // They sign the holder commitment transaction...
7279 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7280 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7281 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7282 encode::serialize_hex(&funding_script), &self.context.channel_id());
7283 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7285 Ok(initial_commitment_tx)
7288 pub fn funding_created<L: Deref>(
7289 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7290 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7294 if self.context.is_outbound() {
7295 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7298 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7299 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7301 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7302 // remember the channel, so it's safe to just send an error_message here and drop the
7304 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7306 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7307 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7308 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7309 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7312 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7313 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7314 // This is an externally observable change before we finish all our checks. In particular
7315 // check_funding_created_signature may fail.
7316 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7318 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7320 Err(ChannelError::Close(e)) => {
7321 self.context.channel_transaction_parameters.funding_outpoint = None;
7322 return Err((self, ChannelError::Close(e)));
7325 // The only error we know how to handle is ChannelError::Close, so we fall over here
7326 // to make sure we don't continue with an inconsistent state.
7327 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7331 let holder_commitment_tx = HolderCommitmentTransaction::new(
7332 initial_commitment_tx,
7335 &self.context.get_holder_pubkeys().funding_pubkey,
7336 self.context.counterparty_funding_pubkey()
7339 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7340 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7343 // Now that we're past error-generating stuff, update our local state:
7345 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7346 self.context.channel_id = funding_txo.to_channel_id();
7347 self.context.cur_counterparty_commitment_transaction_number -= 1;
7348 self.context.cur_holder_commitment_transaction_number -= 1;
7350 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7352 let funding_redeemscript = self.context.get_funding_redeemscript();
7353 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7354 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7355 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7356 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7357 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7358 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7359 shutdown_script, self.context.get_holder_selected_contest_delay(),
7360 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7361 &self.context.channel_transaction_parameters,
7362 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7364 holder_commitment_tx, best_block, self.context.counterparty_node_id);
7365 channel_monitor.provide_initial_counterparty_commitment_tx(
7366 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7367 self.context.cur_counterparty_commitment_transaction_number + 1,
7368 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7369 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7370 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7372 log_info!(logger, "{} funding_signed for peer for channel {}",
7373 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7375 // Promote the channel to a full-fledged one now that we have updated the state and have a
7376 // `ChannelMonitor`.
7377 let mut channel = Channel {
7378 context: self.context,
7380 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7381 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7383 Ok((channel, funding_signed, channel_monitor))
7387 const SERIALIZATION_VERSION: u8 = 3;
7388 const MIN_SERIALIZATION_VERSION: u8 = 3;
7390 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7396 impl Writeable for ChannelUpdateStatus {
7397 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7398 // We only care about writing out the current state as it was announced, ie only either
7399 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7400 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7402 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7403 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7404 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7405 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7411 impl Readable for ChannelUpdateStatus {
7412 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7413 Ok(match <u8 as Readable>::read(reader)? {
7414 0 => ChannelUpdateStatus::Enabled,
7415 1 => ChannelUpdateStatus::Disabled,
7416 _ => return Err(DecodeError::InvalidValue),
7421 impl Writeable for AnnouncementSigsState {
7422 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7423 // We only care about writing out the current state as if we had just disconnected, at
7424 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7426 AnnouncementSigsState::NotSent => 0u8.write(writer),
7427 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7428 AnnouncementSigsState::Committed => 0u8.write(writer),
7429 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7434 impl Readable for AnnouncementSigsState {
7435 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7436 Ok(match <u8 as Readable>::read(reader)? {
7437 0 => AnnouncementSigsState::NotSent,
7438 1 => AnnouncementSigsState::PeerReceived,
7439 _ => return Err(DecodeError::InvalidValue),
7444 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7445 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7446 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7449 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7451 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7452 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7453 // the low bytes now and the optional high bytes later.
7454 let user_id_low = self.context.user_id as u64;
7455 user_id_low.write(writer)?;
7457 // Version 1 deserializers expected to read parts of the config object here. Version 2
7458 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7459 // `minimum_depth` we simply write dummy values here.
7460 writer.write_all(&[0; 8])?;
7462 self.context.channel_id.write(writer)?;
7464 let mut channel_state = self.context.channel_state;
7465 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7466 channel_state.set_peer_disconnected();
7468 channel_state.to_u32().write(writer)?;
7470 self.context.channel_value_satoshis.write(writer)?;
7472 self.context.latest_monitor_update_id.write(writer)?;
7474 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7475 // deserialized from that format.
7476 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7477 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7478 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7480 self.context.destination_script.write(writer)?;
7482 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7483 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7484 self.context.value_to_self_msat.write(writer)?;
7486 let mut dropped_inbound_htlcs = 0;
7487 for htlc in self.context.pending_inbound_htlcs.iter() {
7488 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7489 dropped_inbound_htlcs += 1;
7492 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7493 for htlc in self.context.pending_inbound_htlcs.iter() {
7494 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7497 htlc.htlc_id.write(writer)?;
7498 htlc.amount_msat.write(writer)?;
7499 htlc.cltv_expiry.write(writer)?;
7500 htlc.payment_hash.write(writer)?;
7502 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7503 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7505 htlc_state.write(writer)?;
7507 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7509 htlc_state.write(writer)?;
7511 &InboundHTLCState::Committed => {
7514 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7516 removal_reason.write(writer)?;
7521 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7522 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7523 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7525 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7526 for htlc in self.context.pending_outbound_htlcs.iter() {
7527 htlc.htlc_id.write(writer)?;
7528 htlc.amount_msat.write(writer)?;
7529 htlc.cltv_expiry.write(writer)?;
7530 htlc.payment_hash.write(writer)?;
7531 htlc.source.write(writer)?;
7533 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7535 onion_packet.write(writer)?;
7537 &OutboundHTLCState::Committed => {
7540 &OutboundHTLCState::RemoteRemoved(_) => {
7541 // Treat this as a Committed because we haven't received the CS - they'll
7542 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7545 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7547 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7548 preimages.push(preimage);
7550 let reason: Option<&HTLCFailReason> = outcome.into();
7551 reason.write(writer)?;
7553 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7555 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7556 preimages.push(preimage);
7558 let reason: Option<&HTLCFailReason> = outcome.into();
7559 reason.write(writer)?;
7562 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7563 pending_outbound_blinding_points.push(htlc.blinding_point);
7566 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7567 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7568 // Vec of (htlc_id, failure_code, sha256_of_onion)
7569 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
7570 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7571 for update in self.context.holding_cell_htlc_updates.iter() {
7573 &HTLCUpdateAwaitingACK::AddHTLC {
7574 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7575 blinding_point, skimmed_fee_msat,
7578 amount_msat.write(writer)?;
7579 cltv_expiry.write(writer)?;
7580 payment_hash.write(writer)?;
7581 source.write(writer)?;
7582 onion_routing_packet.write(writer)?;
7584 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7585 holding_cell_blinding_points.push(blinding_point);
7587 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7589 payment_preimage.write(writer)?;
7590 htlc_id.write(writer)?;
7592 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7594 htlc_id.write(writer)?;
7595 err_packet.write(writer)?;
7597 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
7598 htlc_id, failure_code, sha256_of_onion
7600 // We don't want to break downgrading by adding a new variant, so write a dummy
7601 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
7602 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
7604 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
7606 htlc_id.write(writer)?;
7607 dummy_err_packet.write(writer)?;
7612 match self.context.resend_order {
7613 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7614 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7617 self.context.monitor_pending_channel_ready.write(writer)?;
7618 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7619 self.context.monitor_pending_commitment_signed.write(writer)?;
7621 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7622 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7623 pending_forward.write(writer)?;
7624 htlc_id.write(writer)?;
7627 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7628 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7629 htlc_source.write(writer)?;
7630 payment_hash.write(writer)?;
7631 fail_reason.write(writer)?;
7634 if self.context.is_outbound() {
7635 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7636 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7637 Some(feerate).write(writer)?;
7639 // As for inbound HTLCs, if the update was only announced and never committed in a
7640 // commitment_signed, drop it.
7641 None::<u32>.write(writer)?;
7643 self.context.holding_cell_update_fee.write(writer)?;
7645 self.context.next_holder_htlc_id.write(writer)?;
7646 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7647 self.context.update_time_counter.write(writer)?;
7648 self.context.feerate_per_kw.write(writer)?;
7650 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7651 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7652 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7653 // consider the stale state on reload.
7656 self.context.funding_tx_confirmed_in.write(writer)?;
7657 self.context.funding_tx_confirmation_height.write(writer)?;
7658 self.context.short_channel_id.write(writer)?;
7660 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7661 self.context.holder_dust_limit_satoshis.write(writer)?;
7662 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7664 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7665 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7667 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7668 self.context.holder_htlc_minimum_msat.write(writer)?;
7669 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7671 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7672 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7674 match &self.context.counterparty_forwarding_info {
7677 info.fee_base_msat.write(writer)?;
7678 info.fee_proportional_millionths.write(writer)?;
7679 info.cltv_expiry_delta.write(writer)?;
7681 None => 0u8.write(writer)?
7684 self.context.channel_transaction_parameters.write(writer)?;
7685 self.context.funding_transaction.write(writer)?;
7687 self.context.counterparty_cur_commitment_point.write(writer)?;
7688 self.context.counterparty_prev_commitment_point.write(writer)?;
7689 self.context.counterparty_node_id.write(writer)?;
7691 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7693 self.context.commitment_secrets.write(writer)?;
7695 self.context.channel_update_status.write(writer)?;
7697 #[cfg(any(test, fuzzing))]
7698 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7699 #[cfg(any(test, fuzzing))]
7700 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7701 htlc.write(writer)?;
7704 // If the channel type is something other than only-static-remote-key, then we need to have
7705 // older clients fail to deserialize this channel at all. If the type is
7706 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7708 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7709 Some(&self.context.channel_type) } else { None };
7711 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7712 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7713 // a different percentage of the channel value then 10%, which older versions of LDK used
7714 // to set it to before the percentage was made configurable.
7715 let serialized_holder_selected_reserve =
7716 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7717 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7719 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7720 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7721 let serialized_holder_htlc_max_in_flight =
7722 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7723 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7725 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7726 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7728 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7729 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7730 // we write the high bytes as an option here.
7731 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7733 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7735 write_tlv_fields!(writer, {
7736 (0, self.context.announcement_sigs, option),
7737 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7738 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7739 // them twice, once with their original default values above, and once as an option
7740 // here. On the read side, old versions will simply ignore the odd-type entries here,
7741 // and new versions map the default values to None and allow the TLV entries here to
7743 (1, self.context.minimum_depth, option),
7744 (2, chan_type, option),
7745 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7746 (4, serialized_holder_selected_reserve, option),
7747 (5, self.context.config, required),
7748 (6, serialized_holder_htlc_max_in_flight, option),
7749 (7, self.context.shutdown_scriptpubkey, option),
7750 (8, self.context.blocked_monitor_updates, optional_vec),
7751 (9, self.context.target_closing_feerate_sats_per_kw, option),
7752 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7753 (13, self.context.channel_creation_height, required),
7754 (15, preimages, required_vec),
7755 (17, self.context.announcement_sigs_state, required),
7756 (19, self.context.latest_inbound_scid_alias, option),
7757 (21, self.context.outbound_scid_alias, required),
7758 (23, channel_ready_event_emitted, option),
7759 (25, user_id_high_opt, option),
7760 (27, self.context.channel_keys_id, required),
7761 (28, holder_max_accepted_htlcs, option),
7762 (29, self.context.temporary_channel_id, option),
7763 (31, channel_pending_event_emitted, option),
7764 (35, pending_outbound_skimmed_fees, optional_vec),
7765 (37, holding_cell_skimmed_fees, optional_vec),
7766 (38, self.context.is_batch_funding, option),
7767 (39, pending_outbound_blinding_points, optional_vec),
7768 (41, holding_cell_blinding_points, optional_vec),
7769 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
7776 const MAX_ALLOC_SIZE: usize = 64*1024;
7777 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7779 ES::Target: EntropySource,
7780 SP::Target: SignerProvider
7782 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7783 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7784 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7786 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7787 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7788 // the low bytes now and the high bytes later.
7789 let user_id_low: u64 = Readable::read(reader)?;
7791 let mut config = Some(LegacyChannelConfig::default());
7793 // Read the old serialization of the ChannelConfig from version 0.0.98.
7794 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7795 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7796 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7797 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7799 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7800 let mut _val: u64 = Readable::read(reader)?;
7803 let channel_id = Readable::read(reader)?;
7804 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
7805 let channel_value_satoshis = Readable::read(reader)?;
7807 let latest_monitor_update_id = Readable::read(reader)?;
7809 let mut keys_data = None;
7811 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7812 // the `channel_keys_id` TLV is present below.
7813 let keys_len: u32 = Readable::read(reader)?;
7814 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7815 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7816 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7817 let mut data = [0; 1024];
7818 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7819 reader.read_exact(read_slice)?;
7820 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7824 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7825 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7826 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7829 let destination_script = Readable::read(reader)?;
7831 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7832 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7833 let value_to_self_msat = Readable::read(reader)?;
7835 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7837 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7838 for _ in 0..pending_inbound_htlc_count {
7839 pending_inbound_htlcs.push(InboundHTLCOutput {
7840 htlc_id: Readable::read(reader)?,
7841 amount_msat: Readable::read(reader)?,
7842 cltv_expiry: Readable::read(reader)?,
7843 payment_hash: Readable::read(reader)?,
7844 state: match <u8 as Readable>::read(reader)? {
7845 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7846 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7847 3 => InboundHTLCState::Committed,
7848 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7849 _ => return Err(DecodeError::InvalidValue),
7854 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7855 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7856 for _ in 0..pending_outbound_htlc_count {
7857 pending_outbound_htlcs.push(OutboundHTLCOutput {
7858 htlc_id: Readable::read(reader)?,
7859 amount_msat: Readable::read(reader)?,
7860 cltv_expiry: Readable::read(reader)?,
7861 payment_hash: Readable::read(reader)?,
7862 source: Readable::read(reader)?,
7863 state: match <u8 as Readable>::read(reader)? {
7864 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7865 1 => OutboundHTLCState::Committed,
7867 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7868 OutboundHTLCState::RemoteRemoved(option.into())
7871 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7872 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7875 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7876 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7878 _ => return Err(DecodeError::InvalidValue),
7880 skimmed_fee_msat: None,
7881 blinding_point: None,
7885 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7886 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7887 for _ in 0..holding_cell_htlc_update_count {
7888 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7889 0 => HTLCUpdateAwaitingACK::AddHTLC {
7890 amount_msat: Readable::read(reader)?,
7891 cltv_expiry: Readable::read(reader)?,
7892 payment_hash: Readable::read(reader)?,
7893 source: Readable::read(reader)?,
7894 onion_routing_packet: Readable::read(reader)?,
7895 skimmed_fee_msat: None,
7896 blinding_point: None,
7898 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7899 payment_preimage: Readable::read(reader)?,
7900 htlc_id: Readable::read(reader)?,
7902 2 => HTLCUpdateAwaitingACK::FailHTLC {
7903 htlc_id: Readable::read(reader)?,
7904 err_packet: Readable::read(reader)?,
7906 _ => return Err(DecodeError::InvalidValue),
7910 let resend_order = match <u8 as Readable>::read(reader)? {
7911 0 => RAACommitmentOrder::CommitmentFirst,
7912 1 => RAACommitmentOrder::RevokeAndACKFirst,
7913 _ => return Err(DecodeError::InvalidValue),
7916 let monitor_pending_channel_ready = Readable::read(reader)?;
7917 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7918 let monitor_pending_commitment_signed = Readable::read(reader)?;
7920 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7921 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7922 for _ in 0..monitor_pending_forwards_count {
7923 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7926 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7927 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7928 for _ in 0..monitor_pending_failures_count {
7929 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7932 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7934 let holding_cell_update_fee = Readable::read(reader)?;
7936 let next_holder_htlc_id = Readable::read(reader)?;
7937 let next_counterparty_htlc_id = Readable::read(reader)?;
7938 let update_time_counter = Readable::read(reader)?;
7939 let feerate_per_kw = Readable::read(reader)?;
7941 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7942 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7943 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7944 // consider the stale state on reload.
7945 match <u8 as Readable>::read(reader)? {
7948 let _: u32 = Readable::read(reader)?;
7949 let _: u64 = Readable::read(reader)?;
7950 let _: Signature = Readable::read(reader)?;
7952 _ => return Err(DecodeError::InvalidValue),
7955 let funding_tx_confirmed_in = Readable::read(reader)?;
7956 let funding_tx_confirmation_height = Readable::read(reader)?;
7957 let short_channel_id = Readable::read(reader)?;
7959 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7960 let holder_dust_limit_satoshis = Readable::read(reader)?;
7961 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7962 let mut counterparty_selected_channel_reserve_satoshis = None;
7964 // Read the old serialization from version 0.0.98.
7965 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7967 // Read the 8 bytes of backwards-compatibility data.
7968 let _dummy: u64 = Readable::read(reader)?;
7970 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7971 let holder_htlc_minimum_msat = Readable::read(reader)?;
7972 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7974 let mut minimum_depth = None;
7976 // Read the old serialization from version 0.0.98.
7977 minimum_depth = Some(Readable::read(reader)?);
7979 // Read the 4 bytes of backwards-compatibility data.
7980 let _dummy: u32 = Readable::read(reader)?;
7983 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7985 1 => Some(CounterpartyForwardingInfo {
7986 fee_base_msat: Readable::read(reader)?,
7987 fee_proportional_millionths: Readable::read(reader)?,
7988 cltv_expiry_delta: Readable::read(reader)?,
7990 _ => return Err(DecodeError::InvalidValue),
7993 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7994 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7996 let counterparty_cur_commitment_point = Readable::read(reader)?;
7998 let counterparty_prev_commitment_point = Readable::read(reader)?;
7999 let counterparty_node_id = Readable::read(reader)?;
8001 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
8002 let commitment_secrets = Readable::read(reader)?;
8004 let channel_update_status = Readable::read(reader)?;
8006 #[cfg(any(test, fuzzing))]
8007 let mut historical_inbound_htlc_fulfills = HashSet::new();
8008 #[cfg(any(test, fuzzing))]
8010 let htlc_fulfills_len: u64 = Readable::read(reader)?;
8011 for _ in 0..htlc_fulfills_len {
8012 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
8016 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
8017 Some((feerate, if channel_parameters.is_outbound_from_holder {
8018 FeeUpdateState::Outbound
8020 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
8026 let mut announcement_sigs = None;
8027 let mut target_closing_feerate_sats_per_kw = None;
8028 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
8029 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
8030 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
8031 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
8032 // only, so we default to that if none was written.
8033 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
8034 let mut channel_creation_height = Some(serialized_height);
8035 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
8037 // If we read an old Channel, for simplicity we just treat it as "we never sent an
8038 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
8039 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
8040 let mut latest_inbound_scid_alias = None;
8041 let mut outbound_scid_alias = None;
8042 let mut channel_pending_event_emitted = None;
8043 let mut channel_ready_event_emitted = None;
8045 let mut user_id_high_opt: Option<u64> = None;
8046 let mut channel_keys_id: Option<[u8; 32]> = None;
8047 let mut temporary_channel_id: Option<ChannelId> = None;
8048 let mut holder_max_accepted_htlcs: Option<u16> = None;
8050 let mut blocked_monitor_updates = Some(Vec::new());
8052 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8053 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8055 let mut is_batch_funding: Option<()> = None;
8057 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8058 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8060 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8062 read_tlv_fields!(reader, {
8063 (0, announcement_sigs, option),
8064 (1, minimum_depth, option),
8065 (2, channel_type, option),
8066 (3, counterparty_selected_channel_reserve_satoshis, option),
8067 (4, holder_selected_channel_reserve_satoshis, option),
8068 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8069 (6, holder_max_htlc_value_in_flight_msat, option),
8070 (7, shutdown_scriptpubkey, option),
8071 (8, blocked_monitor_updates, optional_vec),
8072 (9, target_closing_feerate_sats_per_kw, option),
8073 (11, monitor_pending_finalized_fulfills, optional_vec),
8074 (13, channel_creation_height, option),
8075 (15, preimages_opt, optional_vec),
8076 (17, announcement_sigs_state, option),
8077 (19, latest_inbound_scid_alias, option),
8078 (21, outbound_scid_alias, option),
8079 (23, channel_ready_event_emitted, option),
8080 (25, user_id_high_opt, option),
8081 (27, channel_keys_id, option),
8082 (28, holder_max_accepted_htlcs, option),
8083 (29, temporary_channel_id, option),
8084 (31, channel_pending_event_emitted, option),
8085 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8086 (37, holding_cell_skimmed_fees_opt, optional_vec),
8087 (38, is_batch_funding, option),
8088 (39, pending_outbound_blinding_points_opt, optional_vec),
8089 (41, holding_cell_blinding_points_opt, optional_vec),
8090 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8093 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8094 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8095 // If we've gotten to the funding stage of the channel, populate the signer with its
8096 // required channel parameters.
8097 if channel_state >= ChannelState::FundingNegotiated {
8098 holder_signer.provide_channel_parameters(&channel_parameters);
8100 (channel_keys_id, holder_signer)
8102 // `keys_data` can be `None` if we had corrupted data.
8103 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8104 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8105 (holder_signer.channel_keys_id(), holder_signer)
8108 if let Some(preimages) = preimages_opt {
8109 let mut iter = preimages.into_iter();
8110 for htlc in pending_outbound_htlcs.iter_mut() {
8112 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8113 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8115 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8116 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8121 // We expect all preimages to be consumed above
8122 if iter.next().is_some() {
8123 return Err(DecodeError::InvalidValue);
8127 let chan_features = channel_type.as_ref().unwrap();
8128 if !chan_features.is_subset(our_supported_features) {
8129 // If the channel was written by a new version and negotiated with features we don't
8130 // understand yet, refuse to read it.
8131 return Err(DecodeError::UnknownRequiredFeature);
8134 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8135 // To account for that, we're proactively setting/overriding the field here.
8136 channel_parameters.channel_type_features = chan_features.clone();
8138 let mut secp_ctx = Secp256k1::new();
8139 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8141 // `user_id` used to be a single u64 value. In order to remain backwards
8142 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8143 // separate u64 values.
8144 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8146 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8148 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8149 let mut iter = skimmed_fees.into_iter();
8150 for htlc in pending_outbound_htlcs.iter_mut() {
8151 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8153 // We expect all skimmed fees to be consumed above
8154 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8156 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8157 let mut iter = skimmed_fees.into_iter();
8158 for htlc in holding_cell_htlc_updates.iter_mut() {
8159 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8160 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8163 // We expect all skimmed fees to be consumed above
8164 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8166 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8167 let mut iter = blinding_pts.into_iter();
8168 for htlc in pending_outbound_htlcs.iter_mut() {
8169 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8171 // We expect all blinding points to be consumed above
8172 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8174 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8175 let mut iter = blinding_pts.into_iter();
8176 for htlc in holding_cell_htlc_updates.iter_mut() {
8177 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8178 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8181 // We expect all blinding points to be consumed above
8182 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8185 if let Some(malformed_htlcs) = malformed_htlcs {
8186 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
8187 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
8188 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
8189 let matches = *htlc_id == malformed_htlc_id;
8190 if matches { debug_assert!(err_packet.data.is_empty()) }
8193 }).ok_or(DecodeError::InvalidValue)?;
8194 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
8195 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
8197 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
8202 context: ChannelContext {
8205 config: config.unwrap(),
8209 // Note that we don't care about serializing handshake limits as we only ever serialize
8210 // channel data after the handshake has completed.
8211 inbound_handshake_limits_override: None,
8214 temporary_channel_id,
8216 announcement_sigs_state: announcement_sigs_state.unwrap(),
8218 channel_value_satoshis,
8220 latest_monitor_update_id,
8222 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8223 shutdown_scriptpubkey,
8226 cur_holder_commitment_transaction_number,
8227 cur_counterparty_commitment_transaction_number,
8230 holder_max_accepted_htlcs,
8231 pending_inbound_htlcs,
8232 pending_outbound_htlcs,
8233 holding_cell_htlc_updates,
8237 monitor_pending_channel_ready,
8238 monitor_pending_revoke_and_ack,
8239 monitor_pending_commitment_signed,
8240 monitor_pending_forwards,
8241 monitor_pending_failures,
8242 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8244 signer_pending_commitment_update: false,
8245 signer_pending_funding: false,
8248 holding_cell_update_fee,
8249 next_holder_htlc_id,
8250 next_counterparty_htlc_id,
8251 update_time_counter,
8254 #[cfg(debug_assertions)]
8255 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8256 #[cfg(debug_assertions)]
8257 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8259 last_sent_closing_fee: None,
8260 pending_counterparty_closing_signed: None,
8261 expecting_peer_commitment_signed: false,
8262 closing_fee_limits: None,
8263 target_closing_feerate_sats_per_kw,
8265 funding_tx_confirmed_in,
8266 funding_tx_confirmation_height,
8268 channel_creation_height: channel_creation_height.unwrap(),
8270 counterparty_dust_limit_satoshis,
8271 holder_dust_limit_satoshis,
8272 counterparty_max_htlc_value_in_flight_msat,
8273 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8274 counterparty_selected_channel_reserve_satoshis,
8275 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8276 counterparty_htlc_minimum_msat,
8277 holder_htlc_minimum_msat,
8278 counterparty_max_accepted_htlcs,
8281 counterparty_forwarding_info,
8283 channel_transaction_parameters: channel_parameters,
8284 funding_transaction,
8287 counterparty_cur_commitment_point,
8288 counterparty_prev_commitment_point,
8289 counterparty_node_id,
8291 counterparty_shutdown_scriptpubkey,
8295 channel_update_status,
8296 closing_signed_in_flight: false,
8300 #[cfg(any(test, fuzzing))]
8301 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8302 #[cfg(any(test, fuzzing))]
8303 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8305 workaround_lnd_bug_4006: None,
8306 sent_message_awaiting_response: None,
8308 latest_inbound_scid_alias,
8309 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8310 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8312 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8313 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8315 #[cfg(any(test, fuzzing))]
8316 historical_inbound_htlc_fulfills,
8318 channel_type: channel_type.unwrap(),
8321 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8330 use bitcoin::blockdata::constants::ChainHash;
8331 use bitcoin::blockdata::script::{ScriptBuf, Builder};
8332 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8333 use bitcoin::blockdata::opcodes;
8334 use bitcoin::network::constants::Network;
8335 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
8336 use crate::ln::{PaymentHash, PaymentPreimage};
8337 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8338 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8339 use crate::ln::channel::InitFeatures;
8340 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8341 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8342 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8343 use crate::ln::msgs;
8344 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8345 use crate::ln::script::ShutdownScript;
8346 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8347 use crate::chain::BestBlock;
8348 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8349 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8350 use crate::chain::transaction::OutPoint;
8351 use crate::routing::router::{Path, RouteHop};
8352 use crate::util::config::UserConfig;
8353 use crate::util::errors::APIError;
8354 use crate::util::ser::{ReadableArgs, Writeable};
8355 use crate::util::test_utils;
8356 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8357 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8358 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8359 use bitcoin::secp256k1::{SecretKey,PublicKey};
8360 use bitcoin::hashes::sha256::Hash as Sha256;
8361 use bitcoin::hashes::Hash;
8362 use bitcoin::hashes::hex::FromHex;
8363 use bitcoin::hash_types::WPubkeyHash;
8364 use bitcoin::blockdata::locktime::absolute::LockTime;
8365 use bitcoin::address::{WitnessProgram, WitnessVersion};
8366 use crate::prelude::*;
8368 struct TestFeeEstimator {
8371 impl FeeEstimator for TestFeeEstimator {
8372 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8378 fn test_max_funding_satoshis_no_wumbo() {
8379 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8380 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8381 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8385 signer: InMemorySigner,
8388 impl EntropySource for Keys {
8389 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8392 impl SignerProvider for Keys {
8393 type EcdsaSigner = InMemorySigner;
8395 type TaprootSigner = InMemorySigner;
8397 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8398 self.signer.channel_keys_id()
8401 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8405 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8407 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8408 let secp_ctx = Secp256k1::signing_only();
8409 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8410 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8411 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8414 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8415 let secp_ctx = Secp256k1::signing_only();
8416 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8417 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8421 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8422 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8423 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8427 fn upfront_shutdown_script_incompatibility() {
8428 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8429 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8430 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8433 let seed = [42; 32];
8434 let network = Network::Testnet;
8435 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8436 keys_provider.expect(OnGetShutdownScriptpubkey {
8437 returns: non_v0_segwit_shutdown_script.clone(),
8440 let secp_ctx = Secp256k1::new();
8441 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8442 let config = UserConfig::default();
8443 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8444 Err(APIError::IncompatibleShutdownScript { script }) => {
8445 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8447 Err(e) => panic!("Unexpected error: {:?}", e),
8448 Ok(_) => panic!("Expected error"),
8452 // Check that, during channel creation, we use the same feerate in the open channel message
8453 // as we do in the Channel object creation itself.
8455 fn test_open_channel_msg_fee() {
8456 let original_fee = 253;
8457 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8458 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8459 let secp_ctx = Secp256k1::new();
8460 let seed = [42; 32];
8461 let network = Network::Testnet;
8462 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8464 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8465 let config = UserConfig::default();
8466 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8468 // Now change the fee so we can check that the fee in the open_channel message is the
8469 // same as the old fee.
8470 fee_est.fee_est = 500;
8471 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8472 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8476 fn test_holder_vs_counterparty_dust_limit() {
8477 // Test that when calculating the local and remote commitment transaction fees, the correct
8478 // dust limits are used.
8479 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8480 let secp_ctx = Secp256k1::new();
8481 let seed = [42; 32];
8482 let network = Network::Testnet;
8483 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8484 let logger = test_utils::TestLogger::new();
8485 let best_block = BestBlock::from_network(network);
8487 // Go through the flow of opening a channel between two nodes, making sure
8488 // they have different dust limits.
8490 // Create Node A's channel pointing to Node B's pubkey
8491 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8492 let config = UserConfig::default();
8493 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8495 // Create Node B's channel by receiving Node A's open_channel message
8496 // Make sure A's dust limit is as we expect.
8497 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8498 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8499 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8501 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8502 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8503 accept_channel_msg.dust_limit_satoshis = 546;
8504 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8505 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8507 // Node A --> Node B: funding created
8508 let output_script = node_a_chan.context.get_funding_redeemscript();
8509 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8510 value: 10000000, script_pubkey: output_script.clone(),
8512 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8513 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8514 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8516 // Node B --> Node A: funding signed
8517 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8518 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8520 // Put some inbound and outbound HTLCs in A's channel.
8521 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8522 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8524 amount_msat: htlc_amount_msat,
8525 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8526 cltv_expiry: 300000000,
8527 state: InboundHTLCState::Committed,
8530 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8532 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8533 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8534 cltv_expiry: 200000000,
8535 state: OutboundHTLCState::Committed,
8536 source: HTLCSource::OutboundRoute {
8537 path: Path { hops: Vec::new(), blinded_tail: None },
8538 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8539 first_hop_htlc_msat: 548,
8540 payment_id: PaymentId([42; 32]),
8542 skimmed_fee_msat: None,
8543 blinding_point: None,
8546 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8547 // the dust limit check.
8548 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8549 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8550 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8551 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8553 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8554 // of the HTLCs are seen to be above the dust limit.
8555 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8556 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8557 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8558 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8559 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8563 fn test_timeout_vs_success_htlc_dust_limit() {
8564 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8565 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8566 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8567 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8568 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8569 let secp_ctx = Secp256k1::new();
8570 let seed = [42; 32];
8571 let network = Network::Testnet;
8572 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8574 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8575 let config = UserConfig::default();
8576 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8578 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8579 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8581 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8582 // counted as dust when it shouldn't be.
8583 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8584 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8585 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8586 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8588 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8589 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8590 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8591 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8592 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8594 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8596 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8597 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8598 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8599 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8600 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8602 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8603 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8604 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8605 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8606 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8610 fn channel_reestablish_no_updates() {
8611 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8612 let logger = test_utils::TestLogger::new();
8613 let secp_ctx = Secp256k1::new();
8614 let seed = [42; 32];
8615 let network = Network::Testnet;
8616 let best_block = BestBlock::from_network(network);
8617 let chain_hash = ChainHash::using_genesis_block(network);
8618 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8620 // Go through the flow of opening a channel between two nodes.
8622 // Create Node A's channel pointing to Node B's pubkey
8623 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8624 let config = UserConfig::default();
8625 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8627 // Create Node B's channel by receiving Node A's open_channel message
8628 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8629 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8630 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8632 // Node B --> Node A: accept channel
8633 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8634 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8636 // Node A --> Node B: funding created
8637 let output_script = node_a_chan.context.get_funding_redeemscript();
8638 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8639 value: 10000000, script_pubkey: output_script.clone(),
8641 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8642 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8643 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8645 // Node B --> Node A: funding signed
8646 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8647 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8649 // Now disconnect the two nodes and check that the commitment point in
8650 // Node B's channel_reestablish message is sane.
8651 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8652 let msg = node_b_chan.get_channel_reestablish(&&logger);
8653 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8654 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8655 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8657 // Check that the commitment point in Node A's channel_reestablish message
8659 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8660 let msg = node_a_chan.get_channel_reestablish(&&logger);
8661 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8662 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8663 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8667 fn test_configured_holder_max_htlc_value_in_flight() {
8668 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8669 let logger = test_utils::TestLogger::new();
8670 let secp_ctx = Secp256k1::new();
8671 let seed = [42; 32];
8672 let network = Network::Testnet;
8673 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8674 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8675 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8677 let mut config_2_percent = UserConfig::default();
8678 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8679 let mut config_99_percent = UserConfig::default();
8680 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8681 let mut config_0_percent = UserConfig::default();
8682 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8683 let mut config_101_percent = UserConfig::default();
8684 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8686 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8687 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8688 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8689 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8690 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8691 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8693 // Test with the upper bound - 1 of valid values (99%).
8694 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8695 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8696 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8698 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8700 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8701 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8702 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8703 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8704 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8705 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8707 // Test with the upper bound - 1 of valid values (99%).
8708 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8709 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8710 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8712 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8713 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8714 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8715 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8716 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8718 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8719 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8721 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8722 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8723 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8725 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8726 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8727 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8728 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8729 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8731 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8732 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8734 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8735 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8736 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8740 fn test_configured_holder_selected_channel_reserve_satoshis() {
8742 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8743 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8744 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8746 // Test with valid but unreasonably high channel reserves
8747 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8748 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8749 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8751 // Test with calculated channel reserve less than lower bound
8752 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8753 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8755 // Test with invalid channel reserves since sum of both is greater than or equal
8757 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8758 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8761 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8762 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8763 let logger = test_utils::TestLogger::new();
8764 let secp_ctx = Secp256k1::new();
8765 let seed = [42; 32];
8766 let network = Network::Testnet;
8767 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8768 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8769 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8772 let mut outbound_node_config = UserConfig::default();
8773 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8774 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8776 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8777 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8779 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8780 let mut inbound_node_config = UserConfig::default();
8781 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8783 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8784 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8786 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8788 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8789 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8791 // Channel Negotiations failed
8792 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8793 assert!(result.is_err());
8798 fn channel_update() {
8799 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8800 let logger = test_utils::TestLogger::new();
8801 let secp_ctx = Secp256k1::new();
8802 let seed = [42; 32];
8803 let network = Network::Testnet;
8804 let best_block = BestBlock::from_network(network);
8805 let chain_hash = ChainHash::using_genesis_block(network);
8806 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8808 // Create Node A's channel pointing to Node B's pubkey
8809 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8810 let config = UserConfig::default();
8811 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8813 // Create Node B's channel by receiving Node A's open_channel message
8814 // Make sure A's dust limit is as we expect.
8815 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8816 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8817 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8819 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8820 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8821 accept_channel_msg.dust_limit_satoshis = 546;
8822 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8823 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8825 // Node A --> Node B: funding created
8826 let output_script = node_a_chan.context.get_funding_redeemscript();
8827 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8828 value: 10000000, script_pubkey: output_script.clone(),
8830 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8831 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8832 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8834 // Node B --> Node A: funding signed
8835 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8836 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8838 // Make sure that receiving a channel update will update the Channel as expected.
8839 let update = ChannelUpdate {
8840 contents: UnsignedChannelUpdate {
8842 short_channel_id: 0,
8845 cltv_expiry_delta: 100,
8846 htlc_minimum_msat: 5,
8847 htlc_maximum_msat: MAX_VALUE_MSAT,
8849 fee_proportional_millionths: 11,
8850 excess_data: Vec::new(),
8852 signature: Signature::from(unsafe { FFISignature::new() })
8854 assert!(node_a_chan.channel_update(&update).unwrap());
8856 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8857 // change our official htlc_minimum_msat.
8858 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8859 match node_a_chan.context.counterparty_forwarding_info() {
8861 assert_eq!(info.cltv_expiry_delta, 100);
8862 assert_eq!(info.fee_base_msat, 110);
8863 assert_eq!(info.fee_proportional_millionths, 11);
8865 None => panic!("expected counterparty forwarding info to be Some")
8868 assert!(!node_a_chan.channel_update(&update).unwrap());
8872 fn blinding_point_skimmed_fee_malformed_ser() {
8873 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
8875 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8876 let secp_ctx = Secp256k1::new();
8877 let seed = [42; 32];
8878 let network = Network::Testnet;
8879 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8881 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8882 let config = UserConfig::default();
8883 let features = channelmanager::provided_init_features(&config);
8884 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8885 let mut chan = Channel { context: outbound_chan.context };
8887 let dummy_htlc_source = HTLCSource::OutboundRoute {
8889 hops: vec![RouteHop {
8890 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8891 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8892 cltv_expiry_delta: 0, maybe_announced_channel: false,
8896 session_priv: test_utils::privkey(42),
8897 first_hop_htlc_msat: 0,
8898 payment_id: PaymentId([42; 32]),
8900 let dummy_outbound_output = OutboundHTLCOutput {
8903 payment_hash: PaymentHash([43; 32]),
8905 state: OutboundHTLCState::Committed,
8906 source: dummy_htlc_source.clone(),
8907 skimmed_fee_msat: None,
8908 blinding_point: None,
8910 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8911 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8913 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8916 htlc.skimmed_fee_msat = Some(1);
8919 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8921 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8924 payment_hash: PaymentHash([43; 32]),
8925 source: dummy_htlc_source.clone(),
8926 onion_routing_packet: msgs::OnionPacket {
8928 public_key: Ok(test_utils::pubkey(1)),
8929 hop_data: [0; 20*65],
8932 skimmed_fee_msat: None,
8933 blinding_point: None,
8935 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8936 payment_preimage: PaymentPreimage([42; 32]),
8939 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
8940 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
8942 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
8943 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
8945 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
8948 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8949 } else if i % 5 == 1 {
8950 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8951 } else if i % 5 == 2 {
8952 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8953 if let HTLCUpdateAwaitingACK::AddHTLC {
8954 ref mut blinding_point, ref mut skimmed_fee_msat, ..
8955 } = &mut dummy_add {
8956 *blinding_point = Some(test_utils::pubkey(42 + i));
8957 *skimmed_fee_msat = Some(42);
8959 holding_cell_htlc_updates.push(dummy_add);
8960 } else if i % 5 == 3 {
8961 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
8963 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
8966 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8968 // Encode and decode the channel and ensure that the HTLCs within are the same.
8969 let encoded_chan = chan.encode();
8970 let mut s = crate::io::Cursor::new(&encoded_chan);
8971 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8972 let features = channelmanager::provided_channel_type_features(&config);
8973 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8974 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8975 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8978 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8980 fn outbound_commitment_test() {
8981 use bitcoin::sighash;
8982 use bitcoin::consensus::encode::serialize;
8983 use bitcoin::sighash::EcdsaSighashType;
8984 use bitcoin::hashes::hex::FromHex;
8985 use bitcoin::hash_types::Txid;
8986 use bitcoin::secp256k1::Message;
8987 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8988 use crate::ln::PaymentPreimage;
8989 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8990 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8991 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8992 use crate::util::logger::Logger;
8993 use crate::sync::Arc;
8994 use core::str::FromStr;
8995 use hex::DisplayHex;
8997 // Test vectors from BOLT 3 Appendices C and F (anchors):
8998 let feeest = TestFeeEstimator{fee_est: 15000};
8999 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
9000 let secp_ctx = Secp256k1::new();
9002 let mut signer = InMemorySigner::new(
9004 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
9005 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9006 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9007 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
9008 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9010 // These aren't set in the test vectors:
9011 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
9017 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
9018 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
9019 let keys_provider = Keys { signer: signer.clone() };
9021 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9022 let mut config = UserConfig::default();
9023 config.channel_handshake_config.announced_channel = false;
9024 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
9025 chan.context.holder_dust_limit_satoshis = 546;
9026 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
9028 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
9030 let counterparty_pubkeys = ChannelPublicKeys {
9031 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
9032 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
9033 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
9034 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
9035 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
9037 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
9038 CounterpartyChannelTransactionParameters {
9039 pubkeys: counterparty_pubkeys.clone(),
9040 selected_contest_delay: 144
9042 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
9043 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9045 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9046 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9048 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9049 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9051 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9052 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9054 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9055 // derived from a commitment_seed, so instead we copy it here and call
9056 // build_commitment_transaction.
9057 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9058 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9059 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9060 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9061 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9063 macro_rules! test_commitment {
9064 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9065 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9066 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9070 macro_rules! test_commitment_with_anchors {
9071 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9072 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9073 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9077 macro_rules! test_commitment_common {
9078 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9079 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9081 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9082 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9084 let htlcs = commitment_stats.htlcs_included.drain(..)
9085 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9087 (commitment_stats.tx, htlcs)
9089 let trusted_tx = commitment_tx.trust();
9090 let unsigned_tx = trusted_tx.built_transaction();
9091 let redeemscript = chan.context.get_funding_redeemscript();
9092 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9093 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9094 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9095 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9097 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9098 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9099 let mut counterparty_htlc_sigs = Vec::new();
9100 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9102 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9103 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9104 counterparty_htlc_sigs.push(remote_signature);
9106 assert_eq!(htlcs.len(), per_htlc.len());
9108 let holder_commitment_tx = HolderCommitmentTransaction::new(
9109 commitment_tx.clone(),
9110 counterparty_signature,
9111 counterparty_htlc_sigs,
9112 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9113 chan.context.counterparty_funding_pubkey()
9115 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9116 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9118 let funding_redeemscript = chan.context.get_funding_redeemscript();
9119 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9120 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9122 // ((htlc, counterparty_sig), (index, holder_sig))
9123 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
9126 log_trace!(logger, "verifying htlc {}", $htlc_idx);
9127 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9129 let ref htlc = htlcs[$htlc_idx];
9130 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
9131 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9132 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9133 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9134 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9135 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9136 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9138 let mut preimage: Option<PaymentPreimage> = None;
9141 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9142 if out == htlc.payment_hash {
9143 preimage = Some(PaymentPreimage([i; 32]));
9147 assert!(preimage.is_some());
9150 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
9151 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
9152 channel_derivation_parameters: ChannelDerivationParameters {
9153 value_satoshis: chan.context.channel_value_satoshis,
9154 keys_id: chan.context.channel_keys_id,
9155 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9157 commitment_txid: trusted_tx.txid(),
9158 per_commitment_number: trusted_tx.commitment_number(),
9159 per_commitment_point: trusted_tx.per_commitment_point(),
9160 feerate_per_kw: trusted_tx.feerate_per_kw(),
9162 preimage: preimage.clone(),
9163 counterparty_sig: *htlc_counterparty_sig,
9164 }, &secp_ctx).unwrap();
9165 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9166 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9168 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9169 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9170 let trusted_tx = holder_commitment_tx.trust();
9171 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9172 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9173 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9175 assert!(htlc_counterparty_sig_iter.next().is_none());
9179 // anchors: simple commitment tx with no HTLCs and single anchor
9180 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9181 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9182 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9184 // simple commitment tx with no HTLCs
9185 chan.context.value_to_self_msat = 7000000000;
9187 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9188 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9189 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9191 // anchors: simple commitment tx with no HTLCs
9192 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9193 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9194 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9196 chan.context.pending_inbound_htlcs.push({
9197 let mut out = InboundHTLCOutput{
9199 amount_msat: 1000000,
9201 payment_hash: PaymentHash([0; 32]),
9202 state: InboundHTLCState::Committed,
9204 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9207 chan.context.pending_inbound_htlcs.push({
9208 let mut out = InboundHTLCOutput{
9210 amount_msat: 2000000,
9212 payment_hash: PaymentHash([0; 32]),
9213 state: InboundHTLCState::Committed,
9215 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9218 chan.context.pending_outbound_htlcs.push({
9219 let mut out = OutboundHTLCOutput{
9221 amount_msat: 2000000,
9223 payment_hash: PaymentHash([0; 32]),
9224 state: OutboundHTLCState::Committed,
9225 source: HTLCSource::dummy(),
9226 skimmed_fee_msat: None,
9227 blinding_point: None,
9229 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9232 chan.context.pending_outbound_htlcs.push({
9233 let mut out = OutboundHTLCOutput{
9235 amount_msat: 3000000,
9237 payment_hash: PaymentHash([0; 32]),
9238 state: OutboundHTLCState::Committed,
9239 source: HTLCSource::dummy(),
9240 skimmed_fee_msat: None,
9241 blinding_point: None,
9243 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9246 chan.context.pending_inbound_htlcs.push({
9247 let mut out = InboundHTLCOutput{
9249 amount_msat: 4000000,
9251 payment_hash: PaymentHash([0; 32]),
9252 state: InboundHTLCState::Committed,
9254 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9258 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9259 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9260 chan.context.feerate_per_kw = 0;
9262 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9263 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9264 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9267 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9268 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9269 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9272 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9273 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9274 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9277 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9278 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9279 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9282 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9283 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9284 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9287 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9288 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9289 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9292 // commitment tx with seven outputs untrimmed (maximum feerate)
9293 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9294 chan.context.feerate_per_kw = 647;
9296 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9297 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9298 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9301 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9302 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9303 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9306 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9307 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9308 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9311 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9312 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9313 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9316 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9317 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9318 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9321 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9322 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9323 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9326 // commitment tx with six outputs untrimmed (minimum feerate)
9327 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9328 chan.context.feerate_per_kw = 648;
9330 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9331 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9332 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9335 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9336 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9337 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9340 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9341 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9342 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9345 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9346 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9347 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9350 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9351 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9352 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9355 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9356 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9357 chan.context.feerate_per_kw = 645;
9358 chan.context.holder_dust_limit_satoshis = 1001;
9360 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9361 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9362 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9365 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9366 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9367 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9370 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9371 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9372 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9375 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9376 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9377 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9380 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9381 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9382 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9385 // commitment tx with six outputs untrimmed (maximum feerate)
9386 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9387 chan.context.feerate_per_kw = 2069;
9388 chan.context.holder_dust_limit_satoshis = 546;
9390 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9391 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9392 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9395 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9396 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9397 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9400 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9401 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9402 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9405 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9406 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9407 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9410 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9411 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9412 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9415 // commitment tx with five outputs untrimmed (minimum feerate)
9416 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9417 chan.context.feerate_per_kw = 2070;
9419 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9420 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9421 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9424 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9425 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9426 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9429 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9430 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9431 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9434 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9435 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9436 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9439 // commitment tx with five outputs untrimmed (maximum feerate)
9440 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9441 chan.context.feerate_per_kw = 2194;
9443 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9444 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9445 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9448 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9449 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9450 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9453 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9454 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9455 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9458 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9459 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9460 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9463 // commitment tx with four outputs untrimmed (minimum feerate)
9464 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9465 chan.context.feerate_per_kw = 2195;
9467 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9468 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9469 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9472 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9473 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9474 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9477 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9478 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9479 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9482 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9483 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9484 chan.context.feerate_per_kw = 2185;
9485 chan.context.holder_dust_limit_satoshis = 2001;
9486 let cached_channel_type = chan.context.channel_type;
9487 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9489 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9490 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9491 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9494 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9495 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9496 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9499 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9500 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9501 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9504 // commitment tx with four outputs untrimmed (maximum feerate)
9505 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9506 chan.context.feerate_per_kw = 3702;
9507 chan.context.holder_dust_limit_satoshis = 546;
9508 chan.context.channel_type = cached_channel_type.clone();
9510 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9511 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9512 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9515 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9516 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9517 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9520 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9521 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9522 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9525 // commitment tx with three outputs untrimmed (minimum feerate)
9526 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9527 chan.context.feerate_per_kw = 3703;
9529 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9530 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9531 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9534 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9535 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9536 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9539 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9540 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9541 chan.context.feerate_per_kw = 3687;
9542 chan.context.holder_dust_limit_satoshis = 3001;
9543 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9545 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9546 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9547 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9550 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9551 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9552 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9555 // commitment tx with three outputs untrimmed (maximum feerate)
9556 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9557 chan.context.feerate_per_kw = 4914;
9558 chan.context.holder_dust_limit_satoshis = 546;
9559 chan.context.channel_type = cached_channel_type.clone();
9561 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9562 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9563 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9566 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9567 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9568 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9571 // commitment tx with two outputs untrimmed (minimum feerate)
9572 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9573 chan.context.feerate_per_kw = 4915;
9574 chan.context.holder_dust_limit_satoshis = 546;
9576 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9577 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9578 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9580 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9581 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9582 chan.context.feerate_per_kw = 4894;
9583 chan.context.holder_dust_limit_satoshis = 4001;
9584 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9586 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9587 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9588 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9590 // commitment tx with two outputs untrimmed (maximum feerate)
9591 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9592 chan.context.feerate_per_kw = 9651180;
9593 chan.context.holder_dust_limit_satoshis = 546;
9594 chan.context.channel_type = cached_channel_type.clone();
9596 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9597 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9598 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9600 // commitment tx with one output untrimmed (minimum feerate)
9601 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9602 chan.context.feerate_per_kw = 9651181;
9604 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9605 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9606 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9608 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9609 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9610 chan.context.feerate_per_kw = 6216010;
9611 chan.context.holder_dust_limit_satoshis = 4001;
9612 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9614 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9615 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9616 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9618 // commitment tx with fee greater than funder amount
9619 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9620 chan.context.feerate_per_kw = 9651936;
9621 chan.context.holder_dust_limit_satoshis = 546;
9622 chan.context.channel_type = cached_channel_type;
9624 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9625 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9626 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9628 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9629 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9630 chan.context.feerate_per_kw = 253;
9631 chan.context.pending_inbound_htlcs.clear();
9632 chan.context.pending_inbound_htlcs.push({
9633 let mut out = InboundHTLCOutput{
9635 amount_msat: 2000000,
9637 payment_hash: PaymentHash([0; 32]),
9638 state: InboundHTLCState::Committed,
9640 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9643 chan.context.pending_outbound_htlcs.clear();
9644 chan.context.pending_outbound_htlcs.push({
9645 let mut out = OutboundHTLCOutput{
9647 amount_msat: 5000001,
9649 payment_hash: PaymentHash([0; 32]),
9650 state: OutboundHTLCState::Committed,
9651 source: HTLCSource::dummy(),
9652 skimmed_fee_msat: None,
9653 blinding_point: None,
9655 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9658 chan.context.pending_outbound_htlcs.push({
9659 let mut out = OutboundHTLCOutput{
9661 amount_msat: 5000000,
9663 payment_hash: PaymentHash([0; 32]),
9664 state: OutboundHTLCState::Committed,
9665 source: HTLCSource::dummy(),
9666 skimmed_fee_msat: None,
9667 blinding_point: None,
9669 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9673 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9674 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9675 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9678 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9679 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9680 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9682 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9683 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9684 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9686 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9687 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9688 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9691 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9692 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9693 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9694 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9697 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9698 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9699 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9701 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9702 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9703 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9705 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9706 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9707 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9712 fn test_per_commitment_secret_gen() {
9713 // Test vectors from BOLT 3 Appendix D:
9715 let mut seed = [0; 32];
9716 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9717 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9718 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9720 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9721 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9722 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9724 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9725 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9727 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9728 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9730 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9731 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9732 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9736 fn test_key_derivation() {
9737 // Test vectors from BOLT 3 Appendix E:
9738 let secp_ctx = Secp256k1::new();
9740 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9741 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9743 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9744 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9746 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9747 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9749 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9750 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9752 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9753 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9755 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9756 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9760 fn test_zero_conf_channel_type_support() {
9761 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9762 let secp_ctx = Secp256k1::new();
9763 let seed = [42; 32];
9764 let network = Network::Testnet;
9765 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9766 let logger = test_utils::TestLogger::new();
9768 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9769 let config = UserConfig::default();
9770 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9771 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9773 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9774 channel_type_features.set_zero_conf_required();
9776 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9777 open_channel_msg.channel_type = Some(channel_type_features);
9778 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9779 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9780 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9781 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9782 assert!(res.is_ok());
9786 fn test_supports_anchors_zero_htlc_tx_fee() {
9787 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9788 // resulting `channel_type`.
9789 let secp_ctx = Secp256k1::new();
9790 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9791 let network = Network::Testnet;
9792 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9793 let logger = test_utils::TestLogger::new();
9795 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9796 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9798 let mut config = UserConfig::default();
9799 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9801 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9802 // need to signal it.
9803 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9804 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9805 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9806 &config, 0, 42, None
9808 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9810 let mut expected_channel_type = ChannelTypeFeatures::empty();
9811 expected_channel_type.set_static_remote_key_required();
9812 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9814 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9815 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9816 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9820 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9821 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9822 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9823 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9824 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9827 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9828 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9832 fn test_rejects_implicit_simple_anchors() {
9833 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9834 // each side's `InitFeatures`, it is rejected.
9835 let secp_ctx = Secp256k1::new();
9836 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9837 let network = Network::Testnet;
9838 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9839 let logger = test_utils::TestLogger::new();
9841 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9842 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9844 let config = UserConfig::default();
9846 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9847 let static_remote_key_required: u64 = 1 << 12;
9848 let simple_anchors_required: u64 = 1 << 20;
9849 let raw_init_features = static_remote_key_required | simple_anchors_required;
9850 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9852 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9853 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9854 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9858 // Set `channel_type` to `None` to force the implicit feature negotiation.
9859 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9860 open_channel_msg.channel_type = None;
9862 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9863 // `static_remote_key`, it will fail the channel.
9864 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9865 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9866 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9867 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9869 assert!(channel_b.is_err());
9873 fn test_rejects_simple_anchors_channel_type() {
9874 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9876 let secp_ctx = Secp256k1::new();
9877 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9878 let network = Network::Testnet;
9879 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9880 let logger = test_utils::TestLogger::new();
9882 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9883 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9885 let config = UserConfig::default();
9887 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9888 let static_remote_key_required: u64 = 1 << 12;
9889 let simple_anchors_required: u64 = 1 << 20;
9890 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9891 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9892 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9893 assert!(!simple_anchors_init.requires_unknown_bits());
9894 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9896 // First, we'll try to open a channel between A and B where A requests a channel type for
9897 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9898 // B as it's not supported by LDK.
9899 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9900 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9901 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9905 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9906 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9908 let res = InboundV1Channel::<&TestKeysInterface>::new(
9909 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9910 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9911 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9913 assert!(res.is_err());
9915 // Then, we'll try to open another channel where A requests a channel type for
9916 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9917 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9919 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9920 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9921 10000000, 100000, 42, &config, 0, 42, None
9924 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9926 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9927 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9928 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9929 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9932 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9933 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9935 let res = channel_a.accept_channel(
9936 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9938 assert!(res.is_err());
9942 fn test_waiting_for_batch() {
9943 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9944 let logger = test_utils::TestLogger::new();
9945 let secp_ctx = Secp256k1::new();
9946 let seed = [42; 32];
9947 let network = Network::Testnet;
9948 let best_block = BestBlock::from_network(network);
9949 let chain_hash = ChainHash::using_genesis_block(network);
9950 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9952 let mut config = UserConfig::default();
9953 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9954 // channel in a batch before all channels are ready.
9955 config.channel_handshake_limits.trust_own_funding_0conf = true;
9957 // Create a channel from node a to node b that will be part of batch funding.
9958 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9959 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9964 &channelmanager::provided_init_features(&config),
9974 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9975 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9976 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9981 &channelmanager::provided_channel_type_features(&config),
9982 &channelmanager::provided_init_features(&config),
9988 true, // Allow node b to send a 0conf channel_ready.
9991 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9992 node_a_chan.accept_channel(
9993 &accept_channel_msg,
9994 &config.channel_handshake_limits,
9995 &channelmanager::provided_init_features(&config),
9998 // Fund the channel with a batch funding transaction.
9999 let output_script = node_a_chan.context.get_funding_redeemscript();
10000 let tx = Transaction {
10002 lock_time: LockTime::ZERO,
10006 value: 10000000, script_pubkey: output_script.clone(),
10009 value: 10000000, script_pubkey: Builder::new().into_script(),
10012 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10013 let funding_created_msg = node_a_chan.get_funding_created(
10014 tx.clone(), funding_outpoint, true, &&logger,
10015 ).map_err(|_| ()).unwrap();
10016 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
10017 &funding_created_msg.unwrap(),
10021 ).map_err(|_| ()).unwrap();
10022 let node_b_updates = node_b_chan.monitor_updating_restored(
10030 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
10031 // broadcasting the funding transaction until the batch is ready.
10032 let res = node_a_chan.funding_signed(
10033 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
10035 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10036 let node_a_updates = node_a_chan.monitor_updating_restored(
10043 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10044 // as the funding transaction depends on all channels in the batch becoming ready.
10045 assert!(node_a_updates.channel_ready.is_none());
10046 assert!(node_a_updates.funding_broadcastable.is_none());
10047 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10049 // It is possible to receive a 0conf channel_ready from the remote node.
10050 node_a_chan.channel_ready(
10051 &node_b_updates.channel_ready.unwrap(),
10059 node_a_chan.context.channel_state,
10060 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10063 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10064 node_a_chan.set_batch_ready();
10065 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10066 assert!(node_a_chan.check_get_channel_ready(0).is_some());