1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
265 sha256_of_onion: [u8; 32],
269 macro_rules! define_state_flags {
270 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
271 #[doc = $flag_type_doc]
272 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
273 struct $flag_type(u32);
278 const $flag: $flag_type = $flag_type($value);
281 /// All flags that apply to the specified [`ChannelState`] variant.
283 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
286 fn new() -> Self { Self(0) }
289 fn from_u32(flags: u32) -> Result<Self, ()> {
290 if flags & !Self::ALL.0 != 0 {
293 Ok($flag_type(flags))
298 fn is_empty(&self) -> bool { self.0 == 0 }
301 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
304 impl core::ops::Not for $flag_type {
306 fn not(self) -> Self::Output { Self(!self.0) }
308 impl core::ops::BitOr for $flag_type {
310 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
312 impl core::ops::BitOrAssign for $flag_type {
313 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
315 impl core::ops::BitAnd for $flag_type {
317 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
319 impl core::ops::BitAndAssign for $flag_type {
320 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
323 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
324 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
326 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
327 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
328 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
330 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
332 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
333 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
335 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
337 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
339 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
340 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
342 impl PartialEq<FundedStateFlags> for $flag_type {
343 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
345 impl From<FundedStateFlags> for $flag_type {
346 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
351 /// We declare all the states/flags here together to help determine which bits are still available
354 pub const OUR_INIT_SENT: u32 = 1 << 0;
355 pub const THEIR_INIT_SENT: u32 = 1 << 1;
356 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
357 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
358 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
359 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
360 pub const CHANNEL_READY: u32 = 1 << 6;
361 pub const PEER_DISCONNECTED: u32 = 1 << 7;
362 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
363 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
364 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
365 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
366 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
367 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
371 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
373 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
374 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
375 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
376 somewhere and we should pause sending any outbound messages until they've managed to \
377 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
378 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
379 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
380 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
381 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
382 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
387 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
388 NegotiatingFundingFlags, [
389 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
390 OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
391 ("Indicates we have received their `open_channel`/`accept_channel` message.",
392 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
397 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
398 FUNDED_STATE, AwaitingChannelReadyFlags, [
399 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
400 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
401 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
402 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
403 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
404 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
405 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
406 is being held until all channels in the batch have received `funding_signed` and have \
407 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
412 "Flags that only apply to [`ChannelState::ChannelReady`].",
413 FUNDED_STATE, ChannelReadyFlags, [
414 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
415 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
416 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
417 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
418 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
422 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
424 /// We are negotiating the parameters required for the channel prior to funding it.
425 NegotiatingFunding(NegotiatingFundingFlags),
426 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
427 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
428 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
430 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
431 /// funding transaction to confirm.
432 AwaitingChannelReady(AwaitingChannelReadyFlags),
433 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
435 ChannelReady(ChannelReadyFlags),
436 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
437 /// is about to drop us, but we store this anyway.
441 macro_rules! impl_state_flag {
442 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
444 fn $get(&self) -> bool {
447 ChannelState::$state(flags) => flags.is_set($state_flag.into()),
456 ChannelState::$state(flags) => *flags |= $state_flag,
458 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
462 fn $clear(&mut self) {
465 ChannelState::$state(flags) => *flags &= !($state_flag),
467 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
471 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
472 impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
474 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
475 impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
480 fn from_u32(state: u32) -> Result<Self, ()> {
482 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
483 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
485 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
486 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
487 .map(|flags| ChannelState::AwaitingChannelReady(flags))
488 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
489 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
490 .map(|flags| ChannelState::ChannelReady(flags))
491 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
492 Ok(ChannelState::NegotiatingFunding(flags))
500 fn to_u32(&self) -> u32 {
502 ChannelState::NegotiatingFunding(flags) => flags.0,
503 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
504 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
505 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
506 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
510 fn is_pre_funded_state(&self) -> bool {
511 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
514 fn is_both_sides_shutdown(&self) -> bool {
515 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
518 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
520 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
521 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
522 _ => FundedStateFlags::new(),
526 fn should_force_holding_cell(&self) -> bool {
528 ChannelState::ChannelReady(flags) =>
529 flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
530 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
531 flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
533 debug_assert!(false, "The holding cell is only valid within ChannelReady");
539 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
540 FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
541 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
542 FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
543 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
544 FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
545 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
546 FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
547 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
548 AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
549 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
550 AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
551 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
552 AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
553 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
554 ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
557 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
559 pub const DEFAULT_MAX_HTLCS: u16 = 50;
561 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
562 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
563 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
564 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
568 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
570 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
572 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
574 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
575 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
576 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
577 /// `holder_max_htlc_value_in_flight_msat`.
578 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
580 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
581 /// `option_support_large_channel` (aka wumbo channels) is not supported.
583 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
585 /// Total bitcoin supply in satoshis.
586 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
588 /// The maximum network dust limit for standard script formats. This currently represents the
589 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
590 /// transaction non-standard and thus refuses to relay it.
591 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
592 /// implementations use this value for their dust limit today.
593 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
595 /// The maximum channel dust limit we will accept from our counterparty.
596 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
598 /// The dust limit is used for both the commitment transaction outputs as well as the closing
599 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
600 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
601 /// In order to avoid having to concern ourselves with standardness during the closing process, we
602 /// simply require our counterparty to use a dust limit which will leave any segwit output
604 /// See <https://github.com/lightning/bolts/issues/905> for more details.
605 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
607 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
608 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
610 /// Used to return a simple Error back to ChannelManager. Will get converted to a
611 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
612 /// channel_id in ChannelManager.
613 pub(super) enum ChannelError {
619 impl fmt::Debug for ChannelError {
620 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
622 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
623 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
624 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
629 impl fmt::Display for ChannelError {
630 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
632 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
633 &ChannelError::Warn(ref e) => write!(f, "{}", e),
634 &ChannelError::Close(ref e) => write!(f, "{}", e),
639 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
641 pub peer_id: Option<PublicKey>,
642 pub channel_id: Option<ChannelId>,
645 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
646 fn log(&self, mut record: Record) {
647 record.peer_id = self.peer_id;
648 record.channel_id = self.channel_id;
649 self.logger.log(record)
653 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
654 where L::Target: Logger {
655 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
656 where S::Target: SignerProvider
660 peer_id: Some(context.counterparty_node_id),
661 channel_id: Some(context.channel_id),
666 macro_rules! secp_check {
667 ($res: expr, $err: expr) => {
670 Err(_) => return Err(ChannelError::Close($err)),
675 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
676 /// our counterparty or not. However, we don't want to announce updates right away to avoid
677 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
678 /// our channel_update message and track the current state here.
679 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
680 #[derive(Clone, Copy, PartialEq)]
681 pub(super) enum ChannelUpdateStatus {
682 /// We've announced the channel as enabled and are connected to our peer.
684 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
686 /// Our channel is live again, but we haven't announced the channel as enabled yet.
688 /// We've announced the channel as disabled.
692 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
694 pub enum AnnouncementSigsState {
695 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
696 /// we sent the last `AnnouncementSignatures`.
698 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
699 /// This state never appears on disk - instead we write `NotSent`.
701 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
702 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
703 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
704 /// they send back a `RevokeAndACK`.
705 /// This state never appears on disk - instead we write `NotSent`.
707 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
708 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
712 /// An enum indicating whether the local or remote side offered a given HTLC.
718 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
721 pending_htlcs_value_msat: u64,
722 on_counterparty_tx_dust_exposure_msat: u64,
723 on_holder_tx_dust_exposure_msat: u64,
724 holding_cell_msat: u64,
725 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
728 /// An enum gathering stats on commitment transaction, either local or remote.
729 struct CommitmentStats<'a> {
730 tx: CommitmentTransaction, // the transaction info
731 feerate_per_kw: u32, // the feerate included to build the transaction
732 total_fee_sat: u64, // the total fee included in the transaction
733 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
734 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
735 local_balance_msat: u64, // local balance before fees but considering dust limits
736 remote_balance_msat: u64, // remote balance before fees but considering dust limits
737 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
738 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
741 /// Used when calculating whether we or the remote can afford an additional HTLC.
742 struct HTLCCandidate {
744 origin: HTLCInitiator,
748 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
756 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
758 enum UpdateFulfillFetch {
760 monitor_update: ChannelMonitorUpdate,
761 htlc_value_msat: u64,
762 msg: Option<msgs::UpdateFulfillHTLC>,
767 /// The return type of get_update_fulfill_htlc_and_commit.
768 pub enum UpdateFulfillCommitFetch {
769 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
770 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
771 /// previously placed in the holding cell (and has since been removed).
773 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
774 monitor_update: ChannelMonitorUpdate,
775 /// The value of the HTLC which was claimed, in msat.
776 htlc_value_msat: u64,
778 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
779 /// or has been forgotten (presumably previously claimed).
783 /// The return value of `monitor_updating_restored`
784 pub(super) struct MonitorRestoreUpdates {
785 pub raa: Option<msgs::RevokeAndACK>,
786 pub commitment_update: Option<msgs::CommitmentUpdate>,
787 pub order: RAACommitmentOrder,
788 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
789 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
790 pub finalized_claimed_htlcs: Vec<HTLCSource>,
791 pub funding_broadcastable: Option<Transaction>,
792 pub channel_ready: Option<msgs::ChannelReady>,
793 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
796 /// The return value of `signer_maybe_unblocked`
798 pub(super) struct SignerResumeUpdates {
799 pub commitment_update: Option<msgs::CommitmentUpdate>,
800 pub funding_signed: Option<msgs::FundingSigned>,
801 pub channel_ready: Option<msgs::ChannelReady>,
804 /// The return value of `channel_reestablish`
805 pub(super) struct ReestablishResponses {
806 pub channel_ready: Option<msgs::ChannelReady>,
807 pub raa: Option<msgs::RevokeAndACK>,
808 pub commitment_update: Option<msgs::CommitmentUpdate>,
809 pub order: RAACommitmentOrder,
810 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
811 pub shutdown_msg: Option<msgs::Shutdown>,
814 /// The result of a shutdown that should be handled.
816 pub(crate) struct ShutdownResult {
817 /// A channel monitor update to apply.
818 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
819 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
820 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
821 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
822 /// propagated to the remainder of the batch.
823 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
824 pub(crate) channel_id: ChannelId,
825 pub(crate) counterparty_node_id: PublicKey,
828 /// If the majority of the channels funds are to the fundee and the initiator holds only just
829 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
830 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
831 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
832 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
833 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
834 /// by this multiple without hitting this case, before sending.
835 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
836 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
837 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
838 /// leave the channel less usable as we hold a bigger reserve.
839 #[cfg(any(fuzzing, test))]
840 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
841 #[cfg(not(any(fuzzing, test)))]
842 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
844 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
845 /// channel creation on an inbound channel, we simply force-close and move on.
846 /// This constant is the one suggested in BOLT 2.
847 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
849 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
850 /// not have enough balance value remaining to cover the onchain cost of this new
851 /// HTLC weight. If this happens, our counterparty fails the reception of our
852 /// commitment_signed including this new HTLC due to infringement on the channel
854 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
855 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
856 /// leads to a channel force-close. Ultimately, this is an issue coming from the
857 /// design of LN state machines, allowing asynchronous updates.
858 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
860 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
861 /// commitment transaction fees, with at least this many HTLCs present on the commitment
862 /// transaction (not counting the value of the HTLCs themselves).
863 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
865 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
866 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
867 /// ChannelUpdate prompted by the config update. This value was determined as follows:
869 /// * The expected interval between ticks (1 minute).
870 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
871 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
872 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
873 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
875 /// The number of ticks that may elapse while we're waiting for a response to a
876 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
879 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
880 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
882 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
883 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
884 /// exceeding this age limit will be force-closed and purged from memory.
885 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
887 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
888 pub(crate) const COINBASE_MATURITY: u32 = 100;
890 struct PendingChannelMonitorUpdate {
891 update: ChannelMonitorUpdate,
894 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
895 (0, update, required),
898 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
899 /// its variants containing an appropriate channel struct.
900 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
901 UnfundedOutboundV1(OutboundV1Channel<SP>),
902 UnfundedInboundV1(InboundV1Channel<SP>),
906 impl<'a, SP: Deref> ChannelPhase<SP> where
907 SP::Target: SignerProvider,
908 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
910 pub fn context(&'a self) -> &'a ChannelContext<SP> {
912 ChannelPhase::Funded(chan) => &chan.context,
913 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
914 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
918 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
920 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
921 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
922 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
927 /// Contains all state common to unfunded inbound/outbound channels.
928 pub(super) struct UnfundedChannelContext {
929 /// A counter tracking how many ticks have elapsed since this unfunded channel was
930 /// created. If this unfunded channel reaches peer has yet to respond after reaching
931 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
933 /// This is so that we don't keep channels around that haven't progressed to a funded state
934 /// in a timely manner.
935 unfunded_channel_age_ticks: usize,
938 impl UnfundedChannelContext {
939 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
940 /// having reached the unfunded channel age limit.
942 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
943 pub fn should_expire_unfunded_channel(&mut self) -> bool {
944 self.unfunded_channel_age_ticks += 1;
945 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
949 /// Contains everything about the channel including state, and various flags.
950 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
951 config: LegacyChannelConfig,
953 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
954 // constructed using it. The second element in the tuple corresponds to the number of ticks that
955 // have elapsed since the update occurred.
956 prev_config: Option<(ChannelConfig, usize)>,
958 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
962 /// The current channel ID.
963 channel_id: ChannelId,
964 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
965 /// Will be `None` for channels created prior to 0.0.115.
966 temporary_channel_id: Option<ChannelId>,
967 channel_state: ChannelState,
969 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
970 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
972 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
973 // Note that a number of our tests were written prior to the behavior here which retransmits
974 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
976 #[cfg(any(test, feature = "_test_utils"))]
977 pub(crate) announcement_sigs_state: AnnouncementSigsState,
978 #[cfg(not(any(test, feature = "_test_utils")))]
979 announcement_sigs_state: AnnouncementSigsState,
981 secp_ctx: Secp256k1<secp256k1::All>,
982 channel_value_satoshis: u64,
984 latest_monitor_update_id: u64,
986 holder_signer: ChannelSignerType<SP>,
987 shutdown_scriptpubkey: Option<ShutdownScript>,
988 destination_script: ScriptBuf,
990 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
991 // generation start at 0 and count up...this simplifies some parts of implementation at the
992 // cost of others, but should really just be changed.
994 cur_holder_commitment_transaction_number: u64,
995 cur_counterparty_commitment_transaction_number: u64,
996 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
997 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
998 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
999 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1001 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1002 /// need to ensure we resend them in the order we originally generated them. Note that because
1003 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1004 /// sufficient to simply set this to the opposite of any message we are generating as we
1005 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1006 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1008 resend_order: RAACommitmentOrder,
1010 monitor_pending_channel_ready: bool,
1011 monitor_pending_revoke_and_ack: bool,
1012 monitor_pending_commitment_signed: bool,
1014 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1015 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1016 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1017 // but need to handle this somehow or we run the risk of losing HTLCs!
1018 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1019 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1020 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1022 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1023 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1024 /// the future when the signer indicates it may have a signature for us.
1026 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1027 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1028 signer_pending_commitment_update: bool,
1029 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1030 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1031 /// outbound or inbound.
1032 signer_pending_funding: bool,
1034 // pending_update_fee is filled when sending and receiving update_fee.
1036 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1037 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1038 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1039 // HTLCs with similar state.
1040 pending_update_fee: Option<(u32, FeeUpdateState)>,
1041 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1042 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1043 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1044 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1045 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1046 holding_cell_update_fee: Option<u32>,
1047 next_holder_htlc_id: u64,
1048 next_counterparty_htlc_id: u64,
1049 feerate_per_kw: u32,
1051 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1052 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1053 /// new block is received, ensuring it's always at least moderately close to the current real
1055 update_time_counter: u32,
1057 #[cfg(debug_assertions)]
1058 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1059 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1060 #[cfg(debug_assertions)]
1061 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1062 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1064 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1065 target_closing_feerate_sats_per_kw: Option<u32>,
1067 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1068 /// update, we need to delay processing it until later. We do that here by simply storing the
1069 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1070 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1072 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1073 /// transaction. These are set once we reach `closing_negotiation_ready`.
1075 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1077 closing_fee_limits: Option<(u64, u64)>,
1079 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1080 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1081 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1082 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1083 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1085 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1086 /// until we see a `commitment_signed` before doing so.
1088 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1089 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1090 expecting_peer_commitment_signed: bool,
1092 /// The hash of the block in which the funding transaction was included.
1093 funding_tx_confirmed_in: Option<BlockHash>,
1094 funding_tx_confirmation_height: u32,
1095 short_channel_id: Option<u64>,
1096 /// Either the height at which this channel was created or the height at which it was last
1097 /// serialized if it was serialized by versions prior to 0.0.103.
1098 /// We use this to close if funding is never broadcasted.
1099 channel_creation_height: u32,
1101 counterparty_dust_limit_satoshis: u64,
1104 pub(super) holder_dust_limit_satoshis: u64,
1106 holder_dust_limit_satoshis: u64,
1109 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1111 counterparty_max_htlc_value_in_flight_msat: u64,
1114 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1116 holder_max_htlc_value_in_flight_msat: u64,
1118 /// minimum channel reserve for self to maintain - set by them.
1119 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1122 pub(super) holder_selected_channel_reserve_satoshis: u64,
1124 holder_selected_channel_reserve_satoshis: u64,
1126 counterparty_htlc_minimum_msat: u64,
1127 holder_htlc_minimum_msat: u64,
1129 pub counterparty_max_accepted_htlcs: u16,
1131 counterparty_max_accepted_htlcs: u16,
1132 holder_max_accepted_htlcs: u16,
1133 minimum_depth: Option<u32>,
1135 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1137 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1138 funding_transaction: Option<Transaction>,
1139 is_batch_funding: Option<()>,
1141 counterparty_cur_commitment_point: Option<PublicKey>,
1142 counterparty_prev_commitment_point: Option<PublicKey>,
1143 counterparty_node_id: PublicKey,
1145 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1147 commitment_secrets: CounterpartyCommitmentSecrets,
1149 channel_update_status: ChannelUpdateStatus,
1150 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1151 /// not complete within a single timer tick (one minute), we should force-close the channel.
1152 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1154 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1155 /// our peer and start the closing_signed negotiation fresh.
1156 closing_signed_in_flight: bool,
1158 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1159 /// This can be used to rebroadcast the channel_announcement message later.
1160 announcement_sigs: Option<(Signature, Signature)>,
1162 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1163 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1164 // be, by comparing the cached values to the fee of the tranaction generated by
1165 // `build_commitment_transaction`.
1166 #[cfg(any(test, fuzzing))]
1167 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1168 #[cfg(any(test, fuzzing))]
1169 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1171 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1172 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1173 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1174 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1175 /// message until we receive a channel_reestablish.
1177 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1178 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1180 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1181 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1182 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1183 /// unblock the state machine.
1185 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1186 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1187 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1189 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1190 /// [`msgs::RevokeAndACK`] message from the counterparty.
1191 sent_message_awaiting_response: Option<usize>,
1193 #[cfg(any(test, fuzzing))]
1194 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1195 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1196 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1197 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1198 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1199 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1200 historical_inbound_htlc_fulfills: HashSet<u64>,
1202 /// This channel's type, as negotiated during channel open
1203 channel_type: ChannelTypeFeatures,
1205 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1206 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1207 // the channel's funding UTXO.
1209 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1210 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1211 // associated channel mapping.
1213 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1214 // to store all of them.
1215 latest_inbound_scid_alias: Option<u64>,
1217 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1218 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1219 // don't currently support node id aliases and eventually privacy should be provided with
1220 // blinded paths instead of simple scid+node_id aliases.
1221 outbound_scid_alias: u64,
1223 // We track whether we already emitted a `ChannelPending` event.
1224 channel_pending_event_emitted: bool,
1226 // We track whether we already emitted a `ChannelReady` event.
1227 channel_ready_event_emitted: bool,
1229 /// The unique identifier used to re-derive the private key material for the channel through
1230 /// [`SignerProvider::derive_channel_signer`].
1231 channel_keys_id: [u8; 32],
1233 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1234 /// store it here and only release it to the `ChannelManager` once it asks for it.
1235 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1238 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1239 /// Allowed in any state (including after shutdown)
1240 pub fn get_update_time_counter(&self) -> u32 {
1241 self.update_time_counter
1244 pub fn get_latest_monitor_update_id(&self) -> u64 {
1245 self.latest_monitor_update_id
1248 pub fn should_announce(&self) -> bool {
1249 self.config.announced_channel
1252 pub fn is_outbound(&self) -> bool {
1253 self.channel_transaction_parameters.is_outbound_from_holder
1256 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1257 /// Allowed in any state (including after shutdown)
1258 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1259 self.config.options.forwarding_fee_base_msat
1262 /// Returns true if we've ever received a message from the remote end for this Channel
1263 pub fn have_received_message(&self) -> bool {
1264 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1267 /// Returns true if this channel is fully established and not known to be closing.
1268 /// Allowed in any state (including after shutdown)
1269 pub fn is_usable(&self) -> bool {
1270 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1271 !self.channel_state.is_local_shutdown_sent() &&
1272 !self.channel_state.is_remote_shutdown_sent() &&
1273 !self.monitor_pending_channel_ready
1276 /// shutdown state returns the state of the channel in its various stages of shutdown
1277 pub fn shutdown_state(&self) -> ChannelShutdownState {
1278 match self.channel_state {
1279 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1280 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1281 ChannelShutdownState::ShutdownInitiated
1282 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1283 ChannelShutdownState::ResolvingHTLCs
1284 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1285 ChannelShutdownState::NegotiatingClosingFee
1287 ChannelShutdownState::NotShuttingDown
1289 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1290 _ => ChannelShutdownState::NotShuttingDown,
1294 fn closing_negotiation_ready(&self) -> bool {
1295 let is_ready_to_close = match self.channel_state {
1296 ChannelState::AwaitingChannelReady(flags) =>
1297 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1298 ChannelState::ChannelReady(flags) =>
1299 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1302 self.pending_inbound_htlcs.is_empty() &&
1303 self.pending_outbound_htlcs.is_empty() &&
1304 self.pending_update_fee.is_none() &&
1308 /// Returns true if this channel is currently available for use. This is a superset of
1309 /// is_usable() and considers things like the channel being temporarily disabled.
1310 /// Allowed in any state (including after shutdown)
1311 pub fn is_live(&self) -> bool {
1312 self.is_usable() && !self.channel_state.is_peer_disconnected()
1315 // Public utilities:
1317 pub fn channel_id(&self) -> ChannelId {
1321 // Return the `temporary_channel_id` used during channel establishment.
1323 // Will return `None` for channels created prior to LDK version 0.0.115.
1324 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1325 self.temporary_channel_id
1328 pub fn minimum_depth(&self) -> Option<u32> {
1332 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1333 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1334 pub fn get_user_id(&self) -> u128 {
1338 /// Gets the channel's type
1339 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1343 /// Gets the channel's `short_channel_id`.
1345 /// Will return `None` if the channel hasn't been confirmed yet.
1346 pub fn get_short_channel_id(&self) -> Option<u64> {
1347 self.short_channel_id
1350 /// Allowed in any state (including after shutdown)
1351 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1352 self.latest_inbound_scid_alias
1355 /// Allowed in any state (including after shutdown)
1356 pub fn outbound_scid_alias(&self) -> u64 {
1357 self.outbound_scid_alias
1360 /// Returns the holder signer for this channel.
1362 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1363 return &self.holder_signer
1366 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1367 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1368 /// or prior to any channel actions during `Channel` initialization.
1369 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1370 debug_assert_eq!(self.outbound_scid_alias, 0);
1371 self.outbound_scid_alias = outbound_scid_alias;
1374 /// Returns the funding_txo we either got from our peer, or were given by
1375 /// get_funding_created.
1376 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1377 self.channel_transaction_parameters.funding_outpoint
1380 /// Returns the height in which our funding transaction was confirmed.
1381 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1382 let conf_height = self.funding_tx_confirmation_height;
1383 if conf_height > 0 {
1390 /// Returns the block hash in which our funding transaction was confirmed.
1391 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1392 self.funding_tx_confirmed_in
1395 /// Returns the current number of confirmations on the funding transaction.
1396 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1397 if self.funding_tx_confirmation_height == 0 {
1398 // We either haven't seen any confirmation yet, or observed a reorg.
1402 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1405 fn get_holder_selected_contest_delay(&self) -> u16 {
1406 self.channel_transaction_parameters.holder_selected_contest_delay
1409 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1410 &self.channel_transaction_parameters.holder_pubkeys
1413 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1414 self.channel_transaction_parameters.counterparty_parameters
1415 .as_ref().map(|params| params.selected_contest_delay)
1418 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1419 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1422 /// Allowed in any state (including after shutdown)
1423 pub fn get_counterparty_node_id(&self) -> PublicKey {
1424 self.counterparty_node_id
1427 /// Allowed in any state (including after shutdown)
1428 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1429 self.holder_htlc_minimum_msat
1432 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1433 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1434 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1437 /// Allowed in any state (including after shutdown)
1438 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1440 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1441 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1442 // channel might have been used to route very small values (either by honest users or as DoS).
1443 self.channel_value_satoshis * 1000 * 9 / 10,
1445 self.counterparty_max_htlc_value_in_flight_msat
1449 /// Allowed in any state (including after shutdown)
1450 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1451 self.counterparty_htlc_minimum_msat
1454 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1455 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1456 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1459 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1460 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1461 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1463 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1464 party_max_htlc_value_in_flight_msat
1469 pub fn get_value_satoshis(&self) -> u64 {
1470 self.channel_value_satoshis
1473 pub fn get_fee_proportional_millionths(&self) -> u32 {
1474 self.config.options.forwarding_fee_proportional_millionths
1477 pub fn get_cltv_expiry_delta(&self) -> u16 {
1478 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1481 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1482 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1483 where F::Target: FeeEstimator
1485 match self.config.options.max_dust_htlc_exposure {
1486 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1487 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1488 ConfirmationTarget::OnChainSweep) as u64;
1489 feerate_per_kw.saturating_mul(multiplier)
1491 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1495 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1496 pub fn prev_config(&self) -> Option<ChannelConfig> {
1497 self.prev_config.map(|prev_config| prev_config.0)
1500 // Checks whether we should emit a `ChannelPending` event.
1501 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1502 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1505 // Returns whether we already emitted a `ChannelPending` event.
1506 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1507 self.channel_pending_event_emitted
1510 // Remembers that we already emitted a `ChannelPending` event.
1511 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1512 self.channel_pending_event_emitted = true;
1515 // Checks whether we should emit a `ChannelReady` event.
1516 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1517 self.is_usable() && !self.channel_ready_event_emitted
1520 // Remembers that we already emitted a `ChannelReady` event.
1521 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1522 self.channel_ready_event_emitted = true;
1525 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1526 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1527 /// no longer be considered when forwarding HTLCs.
1528 pub fn maybe_expire_prev_config(&mut self) {
1529 if self.prev_config.is_none() {
1532 let prev_config = self.prev_config.as_mut().unwrap();
1534 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1535 self.prev_config = None;
1539 /// Returns the current [`ChannelConfig`] applied to the channel.
1540 pub fn config(&self) -> ChannelConfig {
1544 /// Updates the channel's config. A bool is returned indicating whether the config update
1545 /// applied resulted in a new ChannelUpdate message.
1546 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1547 let did_channel_update =
1548 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1549 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1550 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1551 if did_channel_update {
1552 self.prev_config = Some((self.config.options, 0));
1553 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1554 // policy change to propagate throughout the network.
1555 self.update_time_counter += 1;
1557 self.config.options = *config;
1561 /// Returns true if funding_signed was sent/received and the
1562 /// funding transaction has been broadcast if necessary.
1563 pub fn is_funding_broadcast(&self) -> bool {
1564 !self.channel_state.is_pre_funded_state() &&
1565 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1568 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1569 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1570 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1571 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1572 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1574 /// @local is used only to convert relevant internal structures which refer to remote vs local
1575 /// to decide value of outputs and direction of HTLCs.
1576 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1577 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1578 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1579 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1580 /// which peer generated this transaction and "to whom" this transaction flows.
1582 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1583 where L::Target: Logger
1585 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1586 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1587 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1589 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1590 let mut remote_htlc_total_msat = 0;
1591 let mut local_htlc_total_msat = 0;
1592 let mut value_to_self_msat_offset = 0;
1594 let mut feerate_per_kw = self.feerate_per_kw;
1595 if let Some((feerate, update_state)) = self.pending_update_fee {
1596 if match update_state {
1597 // Note that these match the inclusion criteria when scanning
1598 // pending_inbound_htlcs below.
1599 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1600 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1601 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1603 feerate_per_kw = feerate;
1607 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1608 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1609 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1611 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1613 macro_rules! get_htlc_in_commitment {
1614 ($htlc: expr, $offered: expr) => {
1615 HTLCOutputInCommitment {
1617 amount_msat: $htlc.amount_msat,
1618 cltv_expiry: $htlc.cltv_expiry,
1619 payment_hash: $htlc.payment_hash,
1620 transaction_output_index: None
1625 macro_rules! add_htlc_output {
1626 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1627 if $outbound == local { // "offered HTLC output"
1628 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1629 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1632 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1634 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1635 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1636 included_non_dust_htlcs.push((htlc_in_tx, $source));
1638 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1639 included_dust_htlcs.push((htlc_in_tx, $source));
1642 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1643 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1646 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1648 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1649 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1650 included_non_dust_htlcs.push((htlc_in_tx, $source));
1652 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1653 included_dust_htlcs.push((htlc_in_tx, $source));
1659 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1661 for ref htlc in self.pending_inbound_htlcs.iter() {
1662 let (include, state_name) = match htlc.state {
1663 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1664 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1665 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1666 InboundHTLCState::Committed => (true, "Committed"),
1667 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1671 add_htlc_output!(htlc, false, None, state_name);
1672 remote_htlc_total_msat += htlc.amount_msat;
1674 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1676 &InboundHTLCState::LocalRemoved(ref reason) => {
1677 if generated_by_local {
1678 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1679 inbound_htlc_preimages.push(preimage);
1680 value_to_self_msat_offset += htlc.amount_msat as i64;
1690 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1692 for ref htlc in self.pending_outbound_htlcs.iter() {
1693 let (include, state_name) = match htlc.state {
1694 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1695 OutboundHTLCState::Committed => (true, "Committed"),
1696 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1697 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1698 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1701 let preimage_opt = match htlc.state {
1702 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1703 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1704 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1708 if let Some(preimage) = preimage_opt {
1709 outbound_htlc_preimages.push(preimage);
1713 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1714 local_htlc_total_msat += htlc.amount_msat;
1716 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1718 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1719 value_to_self_msat_offset -= htlc.amount_msat as i64;
1721 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1722 if !generated_by_local {
1723 value_to_self_msat_offset -= htlc.amount_msat as i64;
1731 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1732 assert!(value_to_self_msat >= 0);
1733 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1734 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1735 // "violate" their reserve value by couting those against it. Thus, we have to convert
1736 // everything to i64 before subtracting as otherwise we can overflow.
1737 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1738 assert!(value_to_remote_msat >= 0);
1740 #[cfg(debug_assertions)]
1742 // Make sure that the to_self/to_remote is always either past the appropriate
1743 // channel_reserve *or* it is making progress towards it.
1744 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1745 self.holder_max_commitment_tx_output.lock().unwrap()
1747 self.counterparty_max_commitment_tx_output.lock().unwrap()
1749 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1750 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1751 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1752 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1755 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1756 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1757 let (value_to_self, value_to_remote) = if self.is_outbound() {
1758 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1760 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1763 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1764 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1765 let (funding_pubkey_a, funding_pubkey_b) = if local {
1766 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1768 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1771 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1772 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1777 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1778 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1783 let num_nondust_htlcs = included_non_dust_htlcs.len();
1785 let channel_parameters =
1786 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1787 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1788 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1795 &mut included_non_dust_htlcs,
1798 let mut htlcs_included = included_non_dust_htlcs;
1799 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1800 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1801 htlcs_included.append(&mut included_dust_htlcs);
1803 // For the stats, trimmed-to-0 the value in msats accordingly
1804 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1805 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1813 local_balance_msat: value_to_self_msat as u64,
1814 remote_balance_msat: value_to_remote_msat as u64,
1815 inbound_htlc_preimages,
1816 outbound_htlc_preimages,
1821 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1822 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1823 /// our counterparty!)
1824 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1825 /// TODO Some magic rust shit to compile-time check this?
1826 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1827 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1828 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1829 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1830 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1832 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1836 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1837 /// will sign and send to our counterparty.
1838 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1839 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1840 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1841 //may see payments to it!
1842 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1843 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1844 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1846 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1849 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1850 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1851 /// Panics if called before accept_channel/InboundV1Channel::new
1852 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1853 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1856 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1857 &self.get_counterparty_pubkeys().funding_pubkey
1860 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1864 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1865 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1866 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1867 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1868 // more dust balance if the feerate increases when we have several HTLCs pending
1869 // which are near the dust limit.
1870 let mut feerate_per_kw = self.feerate_per_kw;
1871 // If there's a pending update fee, use it to ensure we aren't under-estimating
1872 // potential feerate updates coming soon.
1873 if let Some((feerate, _)) = self.pending_update_fee {
1874 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1876 if let Some(feerate) = outbound_feerate_update {
1877 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1879 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1882 /// Get forwarding information for the counterparty.
1883 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1884 self.counterparty_forwarding_info.clone()
1887 /// Returns a HTLCStats about inbound pending htlcs
1888 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1890 let mut stats = HTLCStats {
1891 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1892 pending_htlcs_value_msat: 0,
1893 on_counterparty_tx_dust_exposure_msat: 0,
1894 on_holder_tx_dust_exposure_msat: 0,
1895 holding_cell_msat: 0,
1896 on_holder_tx_holding_cell_htlcs_count: 0,
1899 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1902 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1903 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1904 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1906 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1907 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1908 for ref htlc in context.pending_inbound_htlcs.iter() {
1909 stats.pending_htlcs_value_msat += htlc.amount_msat;
1910 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1911 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1913 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1914 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1920 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1921 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1923 let mut stats = HTLCStats {
1924 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1925 pending_htlcs_value_msat: 0,
1926 on_counterparty_tx_dust_exposure_msat: 0,
1927 on_holder_tx_dust_exposure_msat: 0,
1928 holding_cell_msat: 0,
1929 on_holder_tx_holding_cell_htlcs_count: 0,
1932 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1935 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1936 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1937 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1939 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1940 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1941 for ref htlc in context.pending_outbound_htlcs.iter() {
1942 stats.pending_htlcs_value_msat += htlc.amount_msat;
1943 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1944 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1946 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1947 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1951 for update in context.holding_cell_htlc_updates.iter() {
1952 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1953 stats.pending_htlcs += 1;
1954 stats.pending_htlcs_value_msat += amount_msat;
1955 stats.holding_cell_msat += amount_msat;
1956 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1957 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1959 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1960 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1962 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1969 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1970 /// Doesn't bother handling the
1971 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1972 /// corner case properly.
1973 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1974 -> AvailableBalances
1975 where F::Target: FeeEstimator
1977 let context = &self;
1978 // Note that we have to handle overflow due to the above case.
1979 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1980 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1982 let mut balance_msat = context.value_to_self_msat;
1983 for ref htlc in context.pending_inbound_htlcs.iter() {
1984 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1985 balance_msat += htlc.amount_msat;
1988 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1990 let outbound_capacity_msat = context.value_to_self_msat
1991 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1993 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1995 let mut available_capacity_msat = outbound_capacity_msat;
1997 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1998 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2002 if context.is_outbound() {
2003 // We should mind channel commit tx fee when computing how much of the available capacity
2004 // can be used in the next htlc. Mirrors the logic in send_htlc.
2006 // The fee depends on whether the amount we will be sending is above dust or not,
2007 // and the answer will in turn change the amount itself — making it a circular
2009 // This complicates the computation around dust-values, up to the one-htlc-value.
2010 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2011 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2012 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2015 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2016 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2017 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2018 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2019 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2020 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2021 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2024 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2025 // value ends up being below dust, we have this fee available again. In that case,
2026 // match the value to right-below-dust.
2027 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2028 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2029 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2030 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2031 debug_assert!(one_htlc_difference_msat != 0);
2032 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2033 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2034 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2036 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2039 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2040 // sending a new HTLC won't reduce their balance below our reserve threshold.
2041 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2042 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2043 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2046 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2047 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2049 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2050 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2051 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2053 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2054 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2055 // we've selected for them, we can only send dust HTLCs.
2056 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2060 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2062 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2063 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2064 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2065 // send above the dust limit (as the router can always overpay to meet the dust limit).
2066 let mut remaining_msat_below_dust_exposure_limit = None;
2067 let mut dust_exposure_dust_limit_msat = 0;
2068 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2070 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2071 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2073 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2074 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2075 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2077 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2078 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2079 remaining_msat_below_dust_exposure_limit =
2080 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2081 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2084 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2085 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2086 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2087 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2088 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2089 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2092 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2093 if available_capacity_msat < dust_exposure_dust_limit_msat {
2094 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2096 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2100 available_capacity_msat = cmp::min(available_capacity_msat,
2101 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2103 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2104 available_capacity_msat = 0;
2108 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2109 - context.value_to_self_msat as i64
2110 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2111 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2113 outbound_capacity_msat,
2114 next_outbound_htlc_limit_msat: available_capacity_msat,
2115 next_outbound_htlc_minimum_msat,
2120 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2121 let context = &self;
2122 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2125 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2126 /// number of pending HTLCs that are on track to be in our next commitment tx.
2128 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2129 /// `fee_spike_buffer_htlc` is `Some`.
2131 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2132 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2134 /// Dust HTLCs are excluded.
2135 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2136 let context = &self;
2137 assert!(context.is_outbound());
2139 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2142 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2143 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2145 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2146 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2148 let mut addl_htlcs = 0;
2149 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2151 HTLCInitiator::LocalOffered => {
2152 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2156 HTLCInitiator::RemoteOffered => {
2157 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2163 let mut included_htlcs = 0;
2164 for ref htlc in context.pending_inbound_htlcs.iter() {
2165 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2168 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2169 // transaction including this HTLC if it times out before they RAA.
2170 included_htlcs += 1;
2173 for ref htlc in context.pending_outbound_htlcs.iter() {
2174 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2178 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2179 OutboundHTLCState::Committed => included_htlcs += 1,
2180 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2181 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2182 // transaction won't be generated until they send us their next RAA, which will mean
2183 // dropping any HTLCs in this state.
2188 for htlc in context.holding_cell_htlc_updates.iter() {
2190 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2191 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2196 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2197 // ack we're guaranteed to never include them in commitment txs anymore.
2201 let num_htlcs = included_htlcs + addl_htlcs;
2202 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2203 #[cfg(any(test, fuzzing))]
2206 if fee_spike_buffer_htlc.is_some() {
2207 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2209 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2210 + context.holding_cell_htlc_updates.len();
2211 let commitment_tx_info = CommitmentTxInfoCached {
2213 total_pending_htlcs,
2214 next_holder_htlc_id: match htlc.origin {
2215 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2216 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2218 next_counterparty_htlc_id: match htlc.origin {
2219 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2220 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2222 feerate: context.feerate_per_kw,
2224 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2229 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2230 /// pending HTLCs that are on track to be in their next commitment tx
2232 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2233 /// `fee_spike_buffer_htlc` is `Some`.
2235 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2236 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2238 /// Dust HTLCs are excluded.
2239 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2240 let context = &self;
2241 assert!(!context.is_outbound());
2243 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2246 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2247 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2249 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2250 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2252 let mut addl_htlcs = 0;
2253 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2255 HTLCInitiator::LocalOffered => {
2256 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2260 HTLCInitiator::RemoteOffered => {
2261 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2267 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2268 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2269 // committed outbound HTLCs, see below.
2270 let mut included_htlcs = 0;
2271 for ref htlc in context.pending_inbound_htlcs.iter() {
2272 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2275 included_htlcs += 1;
2278 for ref htlc in context.pending_outbound_htlcs.iter() {
2279 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2282 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2283 // i.e. if they've responded to us with an RAA after announcement.
2285 OutboundHTLCState::Committed => included_htlcs += 1,
2286 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2287 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2292 let num_htlcs = included_htlcs + addl_htlcs;
2293 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2294 #[cfg(any(test, fuzzing))]
2297 if fee_spike_buffer_htlc.is_some() {
2298 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2300 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2301 let commitment_tx_info = CommitmentTxInfoCached {
2303 total_pending_htlcs,
2304 next_holder_htlc_id: match htlc.origin {
2305 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2306 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2308 next_counterparty_htlc_id: match htlc.origin {
2309 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2310 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2312 feerate: context.feerate_per_kw,
2314 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2319 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2320 where F: Fn() -> Option<O> {
2321 match self.channel_state {
2322 ChannelState::FundingNegotiated => f(),
2323 ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) {
2332 /// Returns the transaction if there is a pending funding transaction that is yet to be
2334 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2335 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2338 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2340 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2341 self.if_unbroadcasted_funding(||
2342 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2346 /// Returns whether the channel is funded in a batch.
2347 pub fn is_batch_funding(&self) -> bool {
2348 self.is_batch_funding.is_some()
2351 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2353 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2354 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2357 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2358 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2359 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2360 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2361 /// immediately (others we will have to allow to time out).
2362 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2363 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2364 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2365 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2366 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2367 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2369 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2370 // return them to fail the payment.
2371 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2372 let counterparty_node_id = self.get_counterparty_node_id();
2373 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2375 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2376 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2381 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2382 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2383 // returning a channel monitor update here would imply a channel monitor update before
2384 // we even registered the channel monitor to begin with, which is invalid.
2385 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2386 // funding transaction, don't return a funding txo (which prevents providing the
2387 // monitor update to the user, even if we return one).
2388 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2389 let generate_monitor_update = match self.channel_state {
2390 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
2393 if generate_monitor_update {
2394 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2395 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2396 update_id: self.latest_monitor_update_id,
2397 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2401 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2403 self.channel_state = ChannelState::ShutdownComplete;
2404 self.update_time_counter += 1;
2407 dropped_outbound_htlcs,
2408 unbroadcasted_batch_funding_txid,
2409 channel_id: self.channel_id,
2410 counterparty_node_id: self.counterparty_node_id,
2414 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2415 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2416 let counterparty_keys = self.build_remote_transaction_keys();
2417 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2419 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2420 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2421 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2422 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2424 match &self.holder_signer {
2425 // TODO (arik): move match into calling method for Taproot
2426 ChannelSignerType::Ecdsa(ecdsa) => {
2427 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2428 .map(|(signature, _)| msgs::FundingSigned {
2429 channel_id: self.channel_id(),
2432 partial_signature_with_nonce: None,
2436 if funding_signed.is_none() {
2437 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2438 self.signer_pending_funding = true;
2439 } else if self.signer_pending_funding {
2440 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2441 self.signer_pending_funding = false;
2444 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2445 (counterparty_initial_commitment_tx, funding_signed)
2447 // TODO (taproot|arik)
2454 // Internal utility functions for channels
2456 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2457 /// `channel_value_satoshis` in msat, set through
2458 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2460 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2462 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2463 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2464 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2466 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2469 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2471 channel_value_satoshis * 10 * configured_percent
2474 /// Returns a minimum channel reserve value the remote needs to maintain,
2475 /// required by us according to the configured or default
2476 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2478 /// Guaranteed to return a value no larger than channel_value_satoshis
2480 /// This is used both for outbound and inbound channels and has lower bound
2481 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2482 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2483 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2484 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2487 /// This is for legacy reasons, present for forward-compatibility.
2488 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2489 /// from storage. Hence, we use this function to not persist default values of
2490 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2491 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2492 let (q, _) = channel_value_satoshis.overflowing_div(100);
2493 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2496 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2497 // Note that num_htlcs should not include dust HTLCs.
2499 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2500 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2503 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2504 // Note that num_htlcs should not include dust HTLCs.
2505 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2506 // Note that we need to divide before multiplying to round properly,
2507 // since the lowest denomination of bitcoin on-chain is the satoshi.
2508 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2511 // Holder designates channel data owned for the benefit of the user client.
2512 // Counterparty designates channel data owned by the another channel participant entity.
2513 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2514 pub context: ChannelContext<SP>,
2517 #[cfg(any(test, fuzzing))]
2518 struct CommitmentTxInfoCached {
2520 total_pending_htlcs: usize,
2521 next_holder_htlc_id: u64,
2522 next_counterparty_htlc_id: u64,
2526 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
2527 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
2528 trait FailHTLCContents {
2529 type Message: FailHTLCMessageName;
2530 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
2531 fn to_inbound_htlc_state(self) -> InboundHTLCState;
2532 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
2534 impl FailHTLCContents for msgs::OnionErrorPacket {
2535 type Message = msgs::UpdateFailHTLC;
2536 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2537 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
2539 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2540 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
2542 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2543 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
2546 impl FailHTLCContents for (u16, [u8; 32]) {
2547 type Message = msgs::UpdateFailMalformedHTLC; // (failure_code, sha256_of_onion)
2548 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2549 msgs::UpdateFailMalformedHTLC {
2552 failure_code: self.0,
2553 sha256_of_onion: self.1
2556 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2557 InboundHTLCState::LocalRemoved(
2558 InboundHTLCRemovalReason::FailMalformed((self.1, self.0))
2561 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2562 HTLCUpdateAwaitingACK::FailMalformedHTLC {
2564 failure_code: self.0,
2565 sha256_of_onion: self.1
2570 trait FailHTLCMessageName {
2571 fn name() -> &'static str;
2573 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
2574 fn name() -> &'static str {
2578 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
2579 fn name() -> &'static str {
2580 "update_fail_malformed_htlc"
2584 impl<SP: Deref> Channel<SP> where
2585 SP::Target: SignerProvider,
2586 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2588 fn check_remote_fee<F: Deref, L: Deref>(
2589 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2590 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2591 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2593 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2594 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2596 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2598 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2599 if feerate_per_kw < lower_limit {
2600 if let Some(cur_feerate) = cur_feerate_per_kw {
2601 if feerate_per_kw > cur_feerate {
2603 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2604 cur_feerate, feerate_per_kw);
2608 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2614 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2615 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2616 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2617 // outside of those situations will fail.
2618 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2622 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2627 1 + // script length (0)
2631 )*4 + // * 4 for non-witness parts
2632 2 + // witness marker and flag
2633 1 + // witness element count
2634 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2635 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2636 2*(1 + 71); // two signatures + sighash type flags
2637 if let Some(spk) = a_scriptpubkey {
2638 ret += ((8+1) + // output values and script length
2639 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2641 if let Some(spk) = b_scriptpubkey {
2642 ret += ((8+1) + // output values and script length
2643 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2649 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2650 assert!(self.context.pending_inbound_htlcs.is_empty());
2651 assert!(self.context.pending_outbound_htlcs.is_empty());
2652 assert!(self.context.pending_update_fee.is_none());
2654 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2655 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2656 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2658 if value_to_holder < 0 {
2659 assert!(self.context.is_outbound());
2660 total_fee_satoshis += (-value_to_holder) as u64;
2661 } else if value_to_counterparty < 0 {
2662 assert!(!self.context.is_outbound());
2663 total_fee_satoshis += (-value_to_counterparty) as u64;
2666 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2667 value_to_counterparty = 0;
2670 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2671 value_to_holder = 0;
2674 assert!(self.context.shutdown_scriptpubkey.is_some());
2675 let holder_shutdown_script = self.get_closing_scriptpubkey();
2676 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2677 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2679 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2680 (closing_transaction, total_fee_satoshis)
2683 fn funding_outpoint(&self) -> OutPoint {
2684 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2687 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2690 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2691 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2693 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2695 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2696 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2697 where L::Target: Logger {
2698 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2699 // (see equivalent if condition there).
2700 assert!(self.context.channel_state.should_force_holding_cell());
2701 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2702 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2703 self.context.latest_monitor_update_id = mon_update_id;
2704 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2705 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2709 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2710 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2711 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2712 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2714 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2715 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2718 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2719 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2720 // these, but for now we just have to treat them as normal.
2722 let mut pending_idx = core::usize::MAX;
2723 let mut htlc_value_msat = 0;
2724 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2725 if htlc.htlc_id == htlc_id_arg {
2726 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2727 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2728 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2730 InboundHTLCState::Committed => {},
2731 InboundHTLCState::LocalRemoved(ref reason) => {
2732 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2734 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2735 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2737 return UpdateFulfillFetch::DuplicateClaim {};
2740 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2741 // Don't return in release mode here so that we can update channel_monitor
2745 htlc_value_msat = htlc.amount_msat;
2749 if pending_idx == core::usize::MAX {
2750 #[cfg(any(test, fuzzing))]
2751 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2752 // this is simply a duplicate claim, not previously failed and we lost funds.
2753 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2754 return UpdateFulfillFetch::DuplicateClaim {};
2757 // Now update local state:
2759 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2760 // can claim it even if the channel hits the chain before we see their next commitment.
2761 self.context.latest_monitor_update_id += 1;
2762 let monitor_update = ChannelMonitorUpdate {
2763 update_id: self.context.latest_monitor_update_id,
2764 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2765 payment_preimage: payment_preimage_arg.clone(),
2769 if self.context.channel_state.should_force_holding_cell() {
2770 // Note that this condition is the same as the assertion in
2771 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2772 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2773 // do not not get into this branch.
2774 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2775 match pending_update {
2776 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2777 if htlc_id_arg == htlc_id {
2778 // Make sure we don't leave latest_monitor_update_id incremented here:
2779 self.context.latest_monitor_update_id -= 1;
2780 #[cfg(any(test, fuzzing))]
2781 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2782 return UpdateFulfillFetch::DuplicateClaim {};
2785 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2786 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2788 if htlc_id_arg == htlc_id {
2789 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2790 // TODO: We may actually be able to switch to a fulfill here, though its
2791 // rare enough it may not be worth the complexity burden.
2792 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2793 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2799 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
2800 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2801 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2803 #[cfg(any(test, fuzzing))]
2804 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2805 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2807 #[cfg(any(test, fuzzing))]
2808 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2811 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2812 if let InboundHTLCState::Committed = htlc.state {
2814 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2815 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2817 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2818 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2821 UpdateFulfillFetch::NewClaim {
2824 msg: Some(msgs::UpdateFulfillHTLC {
2825 channel_id: self.context.channel_id(),
2826 htlc_id: htlc_id_arg,
2827 payment_preimage: payment_preimage_arg,
2832 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2833 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2834 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2835 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2836 // Even if we aren't supposed to let new monitor updates with commitment state
2837 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2838 // matter what. Sadly, to push a new monitor update which flies before others
2839 // already queued, we have to insert it into the pending queue and update the
2840 // update_ids of all the following monitors.
2841 if release_cs_monitor && msg.is_some() {
2842 let mut additional_update = self.build_commitment_no_status_check(logger);
2843 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2844 // to be strictly increasing by one, so decrement it here.
2845 self.context.latest_monitor_update_id = monitor_update.update_id;
2846 monitor_update.updates.append(&mut additional_update.updates);
2848 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2849 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2850 monitor_update.update_id = new_mon_id;
2851 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2852 held_update.update.update_id += 1;
2855 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2856 let update = self.build_commitment_no_status_check(logger);
2857 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2863 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2864 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2866 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2870 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2871 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2872 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2873 /// before we fail backwards.
2875 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2876 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2877 /// [`ChannelError::Ignore`].
2878 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2879 -> Result<(), ChannelError> where L::Target: Logger {
2880 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2881 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2884 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
2885 /// want to fail blinded HTLCs where we are not the intro node.
2887 /// See [`Self::queue_fail_htlc`] for more info.
2888 pub fn queue_fail_malformed_htlc<L: Deref>(
2889 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
2890 ) -> Result<(), ChannelError> where L::Target: Logger {
2891 self.fail_htlc(htlc_id_arg, (failure_code, sha256_of_onion), true, logger)
2892 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2895 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2896 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2897 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2898 /// before we fail backwards.
2900 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2901 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2902 /// [`ChannelError::Ignore`].
2903 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
2904 &mut self, htlc_id_arg: u64, err_packet: E, mut force_holding_cell: bool,
2906 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
2907 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2908 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2911 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2912 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2913 // these, but for now we just have to treat them as normal.
2915 let mut pending_idx = core::usize::MAX;
2916 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2917 if htlc.htlc_id == htlc_id_arg {
2919 InboundHTLCState::Committed => {},
2920 InboundHTLCState::LocalRemoved(ref reason) => {
2921 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2923 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2928 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2929 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2935 if pending_idx == core::usize::MAX {
2936 #[cfg(any(test, fuzzing))]
2937 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2938 // is simply a duplicate fail, not previously failed and we failed-back too early.
2939 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2943 if self.context.channel_state.should_force_holding_cell() {
2944 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2945 force_holding_cell = true;
2948 // Now update local state:
2949 if force_holding_cell {
2950 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2951 match pending_update {
2952 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2953 if htlc_id_arg == htlc_id {
2954 #[cfg(any(test, fuzzing))]
2955 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2959 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2960 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2962 if htlc_id_arg == htlc_id {
2963 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2964 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2970 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2971 self.context.holding_cell_htlc_updates.push(err_packet.to_htlc_update_awaiting_ack(htlc_id_arg));
2975 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
2976 E::Message::name(), &self.context.channel_id());
2978 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2979 htlc.state = err_packet.clone().to_inbound_htlc_state();
2982 Ok(Some(err_packet.to_message(htlc_id_arg, self.context.channel_id())))
2985 // Message handlers:
2986 /// Updates the state of the channel to indicate that all channels in the batch have received
2987 /// funding_signed and persisted their monitors.
2988 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2989 /// treated as a non-batch channel going forward.
2990 pub fn set_batch_ready(&mut self) {
2991 self.context.is_batch_funding = None;
2992 self.context.channel_state.clear_waiting_for_batch();
2995 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2996 /// and the channel is now usable (and public), this may generate an announcement_signatures to
2998 pub fn channel_ready<NS: Deref, L: Deref>(
2999 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
3000 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3001 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3003 NS::Target: NodeSigner,
3006 if self.context.channel_state.is_peer_disconnected() {
3007 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3008 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3011 if let Some(scid_alias) = msg.short_channel_id_alias {
3012 if Some(scid_alias) != self.context.short_channel_id {
3013 // The scid alias provided can be used to route payments *from* our counterparty,
3014 // i.e. can be used for inbound payments and provided in invoices, but is not used
3015 // when routing outbound payments.
3016 self.context.latest_inbound_scid_alias = Some(scid_alias);
3020 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3021 // batch, but we can receive channel_ready messages.
3022 let mut check_reconnection = false;
3023 match &self.context.channel_state {
3024 ChannelState::AwaitingChannelReady(flags) => {
3025 let flags = *flags & !FundedStateFlags::ALL;
3026 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3027 if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3028 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3029 check_reconnection = true;
3030 } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3031 self.context.channel_state.set_their_channel_ready();
3032 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3033 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3034 self.context.update_time_counter += 1;
3036 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3037 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3040 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3041 ChannelState::ChannelReady(_) => check_reconnection = true,
3042 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
3044 if check_reconnection {
3045 // They probably disconnected/reconnected and re-sent the channel_ready, which is
3046 // required, or they're sending a fresh SCID alias.
3047 let expected_point =
3048 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3049 // If they haven't ever sent an updated point, the point they send should match
3051 self.context.counterparty_cur_commitment_point
3052 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3053 // If we've advanced the commitment number once, the second commitment point is
3054 // at `counterparty_prev_commitment_point`, which is not yet revoked.
3055 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3056 self.context.counterparty_prev_commitment_point
3058 // If they have sent updated points, channel_ready is always supposed to match
3059 // their "first" point, which we re-derive here.
3060 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3061 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3062 ).expect("We already advanced, so previous secret keys should have been validated already")))
3064 if expected_point != Some(msg.next_per_commitment_point) {
3065 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3070 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3071 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3073 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3075 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3078 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3079 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3080 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3081 ) -> Result<(), ChannelError>
3082 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3083 FE::Target: FeeEstimator, L::Target: Logger,
3085 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3086 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3088 // We can't accept HTLCs sent after we've sent a shutdown.
3089 if self.context.channel_state.is_local_shutdown_sent() {
3090 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3092 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3093 if self.context.channel_state.is_remote_shutdown_sent() {
3094 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3096 if self.context.channel_state.is_peer_disconnected() {
3097 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3099 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3100 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3102 if msg.amount_msat == 0 {
3103 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3105 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3106 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3109 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3110 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3111 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3112 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3114 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3115 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3118 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3119 // the reserve_satoshis we told them to always have as direct payment so that they lose
3120 // something if we punish them for broadcasting an old state).
3121 // Note that we don't really care about having a small/no to_remote output in our local
3122 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3123 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3124 // present in the next commitment transaction we send them (at least for fulfilled ones,
3125 // failed ones won't modify value_to_self).
3126 // Note that we will send HTLCs which another instance of rust-lightning would think
3127 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3128 // Channel state once they will not be present in the next received commitment
3130 let mut removed_outbound_total_msat = 0;
3131 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3132 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3133 removed_outbound_total_msat += htlc.amount_msat;
3134 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3135 removed_outbound_total_msat += htlc.amount_msat;
3139 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3140 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3143 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3144 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3145 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3147 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3148 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3149 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3150 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3151 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3152 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3153 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3157 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3158 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3159 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3160 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3161 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3162 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3163 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3167 let pending_value_to_self_msat =
3168 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3169 let pending_remote_value_msat =
3170 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3171 if pending_remote_value_msat < msg.amount_msat {
3172 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3175 // Check that the remote can afford to pay for this HTLC on-chain at the current
3176 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3178 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3179 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3180 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3182 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3183 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3187 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3188 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3190 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3191 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3195 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3196 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3200 if !self.context.is_outbound() {
3201 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3202 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3203 // side, only on the sender's. Note that with anchor outputs we are no longer as
3204 // sensitive to fee spikes, so we need to account for them.
3205 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3206 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3207 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3208 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3210 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3211 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3212 // the HTLC, i.e. its status is already set to failing.
3213 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3214 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3217 // Check that they won't violate our local required channel reserve by adding this HTLC.
3218 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3219 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3220 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3221 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3224 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3225 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3227 if msg.cltv_expiry >= 500000000 {
3228 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3231 if self.context.channel_state.is_local_shutdown_sent() {
3232 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3233 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3237 // Now update local state:
3238 self.context.next_counterparty_htlc_id += 1;
3239 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3240 htlc_id: msg.htlc_id,
3241 amount_msat: msg.amount_msat,
3242 payment_hash: msg.payment_hash,
3243 cltv_expiry: msg.cltv_expiry,
3244 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3249 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3251 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3252 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3253 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3254 if htlc.htlc_id == htlc_id {
3255 let outcome = match check_preimage {
3256 None => fail_reason.into(),
3257 Some(payment_preimage) => {
3258 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3259 if payment_hash != htlc.payment_hash {
3260 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3262 OutboundHTLCOutcome::Success(Some(payment_preimage))
3266 OutboundHTLCState::LocalAnnounced(_) =>
3267 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3268 OutboundHTLCState::Committed => {
3269 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3271 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3272 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3277 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3280 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3281 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3282 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3284 if self.context.channel_state.is_peer_disconnected() {
3285 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3288 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3291 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3292 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3293 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3295 if self.context.channel_state.is_peer_disconnected() {
3296 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3299 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3303 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3304 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3305 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3307 if self.context.channel_state.is_peer_disconnected() {
3308 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3311 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3315 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3316 where L::Target: Logger
3318 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3319 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3321 if self.context.channel_state.is_peer_disconnected() {
3322 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3324 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3325 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3328 let funding_script = self.context.get_funding_redeemscript();
3330 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3332 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3333 let commitment_txid = {
3334 let trusted_tx = commitment_stats.tx.trust();
3335 let bitcoin_tx = trusted_tx.built_transaction();
3336 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3338 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3339 log_bytes!(msg.signature.serialize_compact()[..]),
3340 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3341 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3342 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3343 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3347 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3349 // If our counterparty updated the channel fee in this commitment transaction, check that
3350 // they can actually afford the new fee now.
3351 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3352 update_state == FeeUpdateState::RemoteAnnounced
3355 debug_assert!(!self.context.is_outbound());
3356 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3357 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3358 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3361 #[cfg(any(test, fuzzing))]
3363 if self.context.is_outbound() {
3364 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3365 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3366 if let Some(info) = projected_commit_tx_info {
3367 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3368 + self.context.holding_cell_htlc_updates.len();
3369 if info.total_pending_htlcs == total_pending_htlcs
3370 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3371 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3372 && info.feerate == self.context.feerate_per_kw {
3373 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3379 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3380 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3383 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3384 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3385 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3386 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3387 // backwards compatibility, we never use it in production. To provide test coverage, here,
3388 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3389 #[allow(unused_assignments, unused_mut)]
3390 let mut separate_nondust_htlc_sources = false;
3391 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3392 use core::hash::{BuildHasher, Hasher};
3393 // Get a random value using the only std API to do so - the DefaultHasher
3394 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3395 separate_nondust_htlc_sources = rand_val % 2 == 0;
3398 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3399 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3400 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3401 if let Some(_) = htlc.transaction_output_index {
3402 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3403 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3404 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3406 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3407 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3408 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3409 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3410 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3411 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3412 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3413 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3415 if !separate_nondust_htlc_sources {
3416 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3419 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3421 if separate_nondust_htlc_sources {
3422 if let Some(source) = source_opt.take() {
3423 nondust_htlc_sources.push(source);
3426 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3429 let holder_commitment_tx = HolderCommitmentTransaction::new(
3430 commitment_stats.tx,
3432 msg.htlc_signatures.clone(),
3433 &self.context.get_holder_pubkeys().funding_pubkey,
3434 self.context.counterparty_funding_pubkey()
3437 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3438 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3440 // Update state now that we've passed all the can-fail calls...
3441 let mut need_commitment = false;
3442 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3443 if *update_state == FeeUpdateState::RemoteAnnounced {
3444 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3445 need_commitment = true;
3449 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3450 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3451 Some(forward_info.clone())
3453 if let Some(forward_info) = new_forward {
3454 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3455 &htlc.payment_hash, &self.context.channel_id);
3456 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3457 need_commitment = true;
3460 let mut claimed_htlcs = Vec::new();
3461 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3462 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3463 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3464 &htlc.payment_hash, &self.context.channel_id);
3465 // Grab the preimage, if it exists, instead of cloning
3466 let mut reason = OutboundHTLCOutcome::Success(None);
3467 mem::swap(outcome, &mut reason);
3468 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3469 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3470 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3471 // have a `Success(None)` reason. In this case we could forget some HTLC
3472 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3473 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3475 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3477 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3478 need_commitment = true;
3482 self.context.latest_monitor_update_id += 1;
3483 let mut monitor_update = ChannelMonitorUpdate {
3484 update_id: self.context.latest_monitor_update_id,
3485 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3486 commitment_tx: holder_commitment_tx,
3487 htlc_outputs: htlcs_and_sigs,
3489 nondust_htlc_sources,
3493 self.context.cur_holder_commitment_transaction_number -= 1;
3494 self.context.expecting_peer_commitment_signed = false;
3495 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3496 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3497 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3499 if self.context.channel_state.is_monitor_update_in_progress() {
3500 // In case we initially failed monitor updating without requiring a response, we need
3501 // to make sure the RAA gets sent first.
3502 self.context.monitor_pending_revoke_and_ack = true;
3503 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3504 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3505 // the corresponding HTLC status updates so that
3506 // get_last_commitment_update_for_send includes the right HTLCs.
3507 self.context.monitor_pending_commitment_signed = true;
3508 let mut additional_update = self.build_commitment_no_status_check(logger);
3509 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3510 // strictly increasing by one, so decrement it here.
3511 self.context.latest_monitor_update_id = monitor_update.update_id;
3512 monitor_update.updates.append(&mut additional_update.updates);
3514 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3515 &self.context.channel_id);
3516 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3519 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3520 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3521 // we'll send one right away when we get the revoke_and_ack when we
3522 // free_holding_cell_htlcs().
3523 let mut additional_update = self.build_commitment_no_status_check(logger);
3524 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3525 // strictly increasing by one, so decrement it here.
3526 self.context.latest_monitor_update_id = monitor_update.update_id;
3527 monitor_update.updates.append(&mut additional_update.updates);
3531 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3532 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3533 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3534 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3537 /// Public version of the below, checking relevant preconditions first.
3538 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3539 /// returns `(None, Vec::new())`.
3540 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3541 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3542 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3543 where F::Target: FeeEstimator, L::Target: Logger
3545 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
3546 self.free_holding_cell_htlcs(fee_estimator, logger)
3547 } else { (None, Vec::new()) }
3550 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3551 /// for our counterparty.
3552 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3553 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3554 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3555 where F::Target: FeeEstimator, L::Target: Logger
3557 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3558 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3559 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3560 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3562 let mut monitor_update = ChannelMonitorUpdate {
3563 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3564 updates: Vec::new(),
3567 let mut htlc_updates = Vec::new();
3568 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3569 let mut update_add_count = 0;
3570 let mut update_fulfill_count = 0;
3571 let mut update_fail_count = 0;
3572 let mut htlcs_to_fail = Vec::new();
3573 for htlc_update in htlc_updates.drain(..) {
3574 // Note that this *can* fail, though it should be due to rather-rare conditions on
3575 // fee races with adding too many outputs which push our total payments just over
3576 // the limit. In case it's less rare than I anticipate, we may want to revisit
3577 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3578 // to rebalance channels.
3579 match &htlc_update {
3580 &HTLCUpdateAwaitingACK::AddHTLC {
3581 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3582 skimmed_fee_msat, blinding_point, ..
3584 match self.send_htlc(
3585 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3586 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3588 Ok(_) => update_add_count += 1,
3591 ChannelError::Ignore(ref msg) => {
3592 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3593 // If we fail to send here, then this HTLC should
3594 // be failed backwards. Failing to send here
3595 // indicates that this HTLC may keep being put back
3596 // into the holding cell without ever being
3597 // successfully forwarded/failed/fulfilled, causing
3598 // our counterparty to eventually close on us.
3599 htlcs_to_fail.push((source.clone(), *payment_hash));
3602 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3608 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3609 // If an HTLC claim was previously added to the holding cell (via
3610 // `get_update_fulfill_htlc`, then generating the claim message itself must
3611 // not fail - any in between attempts to claim the HTLC will have resulted
3612 // in it hitting the holding cell again and we cannot change the state of a
3613 // holding cell HTLC from fulfill to anything else.
3614 let mut additional_monitor_update =
3615 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3616 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3617 { monitor_update } else { unreachable!() };
3618 update_fulfill_count += 1;
3619 monitor_update.updates.append(&mut additional_monitor_update.updates);
3621 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3622 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3623 Ok(update_fail_msg_option) => {
3624 // If an HTLC failure was previously added to the holding cell (via
3625 // `queue_fail_htlc`) then generating the fail message itself must
3626 // not fail - we should never end up in a state where we double-fail
3627 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3628 // for a full revocation before failing.
3629 debug_assert!(update_fail_msg_option.is_some());
3630 update_fail_count += 1;
3633 if let ChannelError::Ignore(_) = e {}
3635 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3640 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
3641 match self.fail_htlc(htlc_id, (failure_code, sha256_of_onion), false, logger) {
3642 Ok(update_fail_malformed_opt) => {
3643 debug_assert!(update_fail_malformed_opt.is_some()); // See above comment
3644 update_fail_count += 1;
3647 if let ChannelError::Ignore(_) = e {}
3649 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3656 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3657 return (None, htlcs_to_fail);
3659 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3660 self.send_update_fee(feerate, false, fee_estimator, logger)
3665 let mut additional_update = self.build_commitment_no_status_check(logger);
3666 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3667 // but we want them to be strictly increasing by one, so reset it here.
3668 self.context.latest_monitor_update_id = monitor_update.update_id;
3669 monitor_update.updates.append(&mut additional_update.updates);
3671 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3672 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3673 update_add_count, update_fulfill_count, update_fail_count);
3675 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3676 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3682 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3683 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3684 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3685 /// generating an appropriate error *after* the channel state has been updated based on the
3686 /// revoke_and_ack message.
3687 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3688 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3689 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3690 where F::Target: FeeEstimator, L::Target: Logger,
3692 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3693 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3695 if self.context.channel_state.is_peer_disconnected() {
3696 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3698 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3699 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3702 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3704 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3705 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3706 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3710 if !self.context.channel_state.is_awaiting_remote_revoke() {
3711 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3712 // haven't given them a new commitment transaction to broadcast). We should probably
3713 // take advantage of this by updating our channel monitor, sending them an error, and
3714 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3715 // lot of work, and there's some chance this is all a misunderstanding anyway.
3716 // We have to do *something*, though, since our signer may get mad at us for otherwise
3717 // jumping a remote commitment number, so best to just force-close and move on.
3718 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3721 #[cfg(any(test, fuzzing))]
3723 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3724 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3727 match &self.context.holder_signer {
3728 ChannelSignerType::Ecdsa(ecdsa) => {
3729 ecdsa.validate_counterparty_revocation(
3730 self.context.cur_counterparty_commitment_transaction_number + 1,
3732 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3734 // TODO (taproot|arik)
3739 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3740 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3741 self.context.latest_monitor_update_id += 1;
3742 let mut monitor_update = ChannelMonitorUpdate {
3743 update_id: self.context.latest_monitor_update_id,
3744 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3745 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3746 secret: msg.per_commitment_secret,
3750 // Update state now that we've passed all the can-fail calls...
3751 // (note that we may still fail to generate the new commitment_signed message, but that's
3752 // OK, we step the channel here and *then* if the new generation fails we can fail the
3753 // channel based on that, but stepping stuff here should be safe either way.
3754 self.context.channel_state.clear_awaiting_remote_revoke();
3755 self.context.sent_message_awaiting_response = None;
3756 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3757 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3758 self.context.cur_counterparty_commitment_transaction_number -= 1;
3760 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3761 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3764 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3765 let mut to_forward_infos = Vec::new();
3766 let mut revoked_htlcs = Vec::new();
3767 let mut finalized_claimed_htlcs = Vec::new();
3768 let mut update_fail_htlcs = Vec::new();
3769 let mut update_fail_malformed_htlcs = Vec::new();
3770 let mut require_commitment = false;
3771 let mut value_to_self_msat_diff: i64 = 0;
3774 // Take references explicitly so that we can hold multiple references to self.context.
3775 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3776 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3777 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3779 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3780 pending_inbound_htlcs.retain(|htlc| {
3781 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3782 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3783 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3784 value_to_self_msat_diff += htlc.amount_msat as i64;
3786 *expecting_peer_commitment_signed = true;
3790 pending_outbound_htlcs.retain(|htlc| {
3791 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3792 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3793 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3794 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3796 finalized_claimed_htlcs.push(htlc.source.clone());
3797 // They fulfilled, so we sent them money
3798 value_to_self_msat_diff -= htlc.amount_msat as i64;
3803 for htlc in pending_inbound_htlcs.iter_mut() {
3804 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3806 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3810 let mut state = InboundHTLCState::Committed;
3811 mem::swap(&mut state, &mut htlc.state);
3813 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3814 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3815 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3816 require_commitment = true;
3817 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3818 match forward_info {
3819 PendingHTLCStatus::Fail(fail_msg) => {
3820 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3821 require_commitment = true;
3823 HTLCFailureMsg::Relay(msg) => {
3824 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3825 update_fail_htlcs.push(msg)
3827 HTLCFailureMsg::Malformed(msg) => {
3828 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3829 update_fail_malformed_htlcs.push(msg)
3833 PendingHTLCStatus::Forward(forward_info) => {
3834 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3835 to_forward_infos.push((forward_info, htlc.htlc_id));
3836 htlc.state = InboundHTLCState::Committed;
3842 for htlc in pending_outbound_htlcs.iter_mut() {
3843 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3844 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3845 htlc.state = OutboundHTLCState::Committed;
3846 *expecting_peer_commitment_signed = true;
3848 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3849 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3850 // Grab the preimage, if it exists, instead of cloning
3851 let mut reason = OutboundHTLCOutcome::Success(None);
3852 mem::swap(outcome, &mut reason);
3853 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3854 require_commitment = true;
3858 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3860 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3861 match update_state {
3862 FeeUpdateState::Outbound => {
3863 debug_assert!(self.context.is_outbound());
3864 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3865 self.context.feerate_per_kw = feerate;
3866 self.context.pending_update_fee = None;
3867 self.context.expecting_peer_commitment_signed = true;
3869 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3870 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3871 debug_assert!(!self.context.is_outbound());
3872 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3873 require_commitment = true;
3874 self.context.feerate_per_kw = feerate;
3875 self.context.pending_update_fee = None;
3880 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3881 let release_state_str =
3882 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3883 macro_rules! return_with_htlcs_to_fail {
3884 ($htlcs_to_fail: expr) => {
3885 if !release_monitor {
3886 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3887 update: monitor_update,
3889 return Ok(($htlcs_to_fail, None));
3891 return Ok(($htlcs_to_fail, Some(monitor_update)));
3896 if self.context.channel_state.is_monitor_update_in_progress() {
3897 // We can't actually generate a new commitment transaction (incl by freeing holding
3898 // cells) while we can't update the monitor, so we just return what we have.
3899 if require_commitment {
3900 self.context.monitor_pending_commitment_signed = true;
3901 // When the monitor updating is restored we'll call
3902 // get_last_commitment_update_for_send(), which does not update state, but we're
3903 // definitely now awaiting a remote revoke before we can step forward any more, so
3905 let mut additional_update = self.build_commitment_no_status_check(logger);
3906 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3907 // strictly increasing by one, so decrement it here.
3908 self.context.latest_monitor_update_id = monitor_update.update_id;
3909 monitor_update.updates.append(&mut additional_update.updates);
3911 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3912 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3913 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3914 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3915 return_with_htlcs_to_fail!(Vec::new());
3918 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3919 (Some(mut additional_update), htlcs_to_fail) => {
3920 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3921 // strictly increasing by one, so decrement it here.
3922 self.context.latest_monitor_update_id = monitor_update.update_id;
3923 monitor_update.updates.append(&mut additional_update.updates);
3925 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3926 &self.context.channel_id(), release_state_str);
3928 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3929 return_with_htlcs_to_fail!(htlcs_to_fail);
3931 (None, htlcs_to_fail) => {
3932 if require_commitment {
3933 let mut additional_update = self.build_commitment_no_status_check(logger);
3935 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3936 // strictly increasing by one, so decrement it here.
3937 self.context.latest_monitor_update_id = monitor_update.update_id;
3938 monitor_update.updates.append(&mut additional_update.updates);
3940 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3941 &self.context.channel_id(),
3942 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3945 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3946 return_with_htlcs_to_fail!(htlcs_to_fail);
3948 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3949 &self.context.channel_id(), release_state_str);
3951 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3952 return_with_htlcs_to_fail!(htlcs_to_fail);
3958 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3959 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3960 /// commitment update.
3961 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3962 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3963 where F::Target: FeeEstimator, L::Target: Logger
3965 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3966 assert!(msg_opt.is_none(), "We forced holding cell?");
3969 /// Adds a pending update to this channel. See the doc for send_htlc for
3970 /// further details on the optionness of the return value.
3971 /// If our balance is too low to cover the cost of the next commitment transaction at the
3972 /// new feerate, the update is cancelled.
3974 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3975 /// [`Channel`] if `force_holding_cell` is false.
3976 fn send_update_fee<F: Deref, L: Deref>(
3977 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3978 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3979 ) -> Option<msgs::UpdateFee>
3980 where F::Target: FeeEstimator, L::Target: Logger
3982 if !self.context.is_outbound() {
3983 panic!("Cannot send fee from inbound channel");
3985 if !self.context.is_usable() {
3986 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3988 if !self.context.is_live() {
3989 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3992 // Before proposing a feerate update, check that we can actually afford the new fee.
3993 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3994 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3995 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3996 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3997 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3998 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3999 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4000 //TODO: auto-close after a number of failures?
4001 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
4005 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
4006 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4007 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4008 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4009 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4010 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4013 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4014 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4018 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4019 force_holding_cell = true;
4022 if force_holding_cell {
4023 self.context.holding_cell_update_fee = Some(feerate_per_kw);
4027 debug_assert!(self.context.pending_update_fee.is_none());
4028 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4030 Some(msgs::UpdateFee {
4031 channel_id: self.context.channel_id,
4036 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4037 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4039 /// No further message handling calls may be made until a channel_reestablish dance has
4041 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
4042 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
4043 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4044 if self.context.channel_state.is_pre_funded_state() {
4048 if self.context.channel_state.is_peer_disconnected() {
4049 // While the below code should be idempotent, it's simpler to just return early, as
4050 // redundant disconnect events can fire, though they should be rare.
4054 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4055 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4058 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4059 // will be retransmitted.
4060 self.context.last_sent_closing_fee = None;
4061 self.context.pending_counterparty_closing_signed = None;
4062 self.context.closing_fee_limits = None;
4064 let mut inbound_drop_count = 0;
4065 self.context.pending_inbound_htlcs.retain(|htlc| {
4067 InboundHTLCState::RemoteAnnounced(_) => {
4068 // They sent us an update_add_htlc but we never got the commitment_signed.
4069 // We'll tell them what commitment_signed we're expecting next and they'll drop
4070 // this HTLC accordingly
4071 inbound_drop_count += 1;
4074 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4075 // We received a commitment_signed updating this HTLC and (at least hopefully)
4076 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4077 // in response to it yet, so don't touch it.
4080 InboundHTLCState::Committed => true,
4081 InboundHTLCState::LocalRemoved(_) => {
4082 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4083 // re-transmit if needed) and they may have even sent a revoke_and_ack back
4084 // (that we missed). Keep this around for now and if they tell us they missed
4085 // the commitment_signed we can re-transmit the update then.
4090 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4092 if let Some((_, update_state)) = self.context.pending_update_fee {
4093 if update_state == FeeUpdateState::RemoteAnnounced {
4094 debug_assert!(!self.context.is_outbound());
4095 self.context.pending_update_fee = None;
4099 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4100 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4101 // They sent us an update to remove this but haven't yet sent the corresponding
4102 // commitment_signed, we need to move it back to Committed and they can re-send
4103 // the update upon reconnection.
4104 htlc.state = OutboundHTLCState::Committed;
4108 self.context.sent_message_awaiting_response = None;
4110 self.context.channel_state.set_peer_disconnected();
4111 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4115 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4116 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4117 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4118 /// update completes (potentially immediately).
4119 /// The messages which were generated with the monitor update must *not* have been sent to the
4120 /// remote end, and must instead have been dropped. They will be regenerated when
4121 /// [`Self::monitor_updating_restored`] is called.
4123 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4124 /// [`chain::Watch`]: crate::chain::Watch
4125 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4126 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4127 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4128 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4129 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4131 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4132 self.context.monitor_pending_commitment_signed |= resend_commitment;
4133 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4134 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4135 self.context.monitor_pending_failures.append(&mut pending_fails);
4136 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4137 self.context.channel_state.set_monitor_update_in_progress();
4140 /// Indicates that the latest ChannelMonitor update has been committed by the client
4141 /// successfully and we should restore normal operation. Returns messages which should be sent
4142 /// to the remote side.
4143 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4144 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4145 user_config: &UserConfig, best_block_height: u32
4146 ) -> MonitorRestoreUpdates
4149 NS::Target: NodeSigner
4151 assert!(self.context.channel_state.is_monitor_update_in_progress());
4152 self.context.channel_state.clear_monitor_update_in_progress();
4154 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4155 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4156 // first received the funding_signed.
4157 let mut funding_broadcastable =
4158 if self.context.is_outbound() &&
4159 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4160 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
4162 self.context.funding_transaction.take()
4164 // That said, if the funding transaction is already confirmed (ie we're active with a
4165 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4166 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4167 funding_broadcastable = None;
4170 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4171 // (and we assume the user never directly broadcasts the funding transaction and waits for
4172 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4173 // * an inbound channel that failed to persist the monitor on funding_created and we got
4174 // the funding transaction confirmed before the monitor was persisted, or
4175 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4176 let channel_ready = if self.context.monitor_pending_channel_ready {
4177 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4178 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4179 self.context.monitor_pending_channel_ready = false;
4180 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4181 Some(msgs::ChannelReady {
4182 channel_id: self.context.channel_id(),
4183 next_per_commitment_point,
4184 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4188 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4190 let mut accepted_htlcs = Vec::new();
4191 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4192 let mut failed_htlcs = Vec::new();
4193 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4194 let mut finalized_claimed_htlcs = Vec::new();
4195 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4197 if self.context.channel_state.is_peer_disconnected() {
4198 self.context.monitor_pending_revoke_and_ack = false;
4199 self.context.monitor_pending_commitment_signed = false;
4200 return MonitorRestoreUpdates {
4201 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4202 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4206 let raa = if self.context.monitor_pending_revoke_and_ack {
4207 Some(self.get_last_revoke_and_ack())
4209 let commitment_update = if self.context.monitor_pending_commitment_signed {
4210 self.get_last_commitment_update_for_send(logger).ok()
4212 if commitment_update.is_some() {
4213 self.mark_awaiting_response();
4216 self.context.monitor_pending_revoke_and_ack = false;
4217 self.context.monitor_pending_commitment_signed = false;
4218 let order = self.context.resend_order.clone();
4219 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4220 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4221 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4222 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4223 MonitorRestoreUpdates {
4224 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4228 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4229 where F::Target: FeeEstimator, L::Target: Logger
4231 if self.context.is_outbound() {
4232 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4234 if self.context.channel_state.is_peer_disconnected() {
4235 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4237 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4239 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4240 self.context.update_time_counter += 1;
4241 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4242 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4243 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4244 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4245 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4246 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4247 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4248 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4249 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4250 msg.feerate_per_kw, holder_tx_dust_exposure)));
4252 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4253 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4254 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4260 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4263 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4264 let commitment_update = if self.context.signer_pending_commitment_update {
4265 self.get_last_commitment_update_for_send(logger).ok()
4267 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4268 self.context.get_funding_signed_msg(logger).1
4270 let channel_ready = if funding_signed.is_some() {
4271 self.check_get_channel_ready(0)
4274 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
4275 if commitment_update.is_some() { "a" } else { "no" },
4276 if funding_signed.is_some() { "a" } else { "no" },
4277 if channel_ready.is_some() { "a" } else { "no" });
4279 SignerResumeUpdates {
4286 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4287 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4288 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4289 msgs::RevokeAndACK {
4290 channel_id: self.context.channel_id,
4291 per_commitment_secret,
4292 next_per_commitment_point,
4294 next_local_nonce: None,
4298 /// Gets the last commitment update for immediate sending to our peer.
4299 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4300 let mut update_add_htlcs = Vec::new();
4301 let mut update_fulfill_htlcs = Vec::new();
4302 let mut update_fail_htlcs = Vec::new();
4303 let mut update_fail_malformed_htlcs = Vec::new();
4305 for htlc in self.context.pending_outbound_htlcs.iter() {
4306 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4307 update_add_htlcs.push(msgs::UpdateAddHTLC {
4308 channel_id: self.context.channel_id(),
4309 htlc_id: htlc.htlc_id,
4310 amount_msat: htlc.amount_msat,
4311 payment_hash: htlc.payment_hash,
4312 cltv_expiry: htlc.cltv_expiry,
4313 onion_routing_packet: (**onion_packet).clone(),
4314 skimmed_fee_msat: htlc.skimmed_fee_msat,
4315 blinding_point: htlc.blinding_point,
4320 for htlc in self.context.pending_inbound_htlcs.iter() {
4321 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4323 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4324 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4325 channel_id: self.context.channel_id(),
4326 htlc_id: htlc.htlc_id,
4327 reason: err_packet.clone()
4330 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4331 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4332 channel_id: self.context.channel_id(),
4333 htlc_id: htlc.htlc_id,
4334 sha256_of_onion: sha256_of_onion.clone(),
4335 failure_code: failure_code.clone(),
4338 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4339 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4340 channel_id: self.context.channel_id(),
4341 htlc_id: htlc.htlc_id,
4342 payment_preimage: payment_preimage.clone(),
4349 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4350 Some(msgs::UpdateFee {
4351 channel_id: self.context.channel_id(),
4352 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4356 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4357 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4358 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4359 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4360 if self.context.signer_pending_commitment_update {
4361 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4362 self.context.signer_pending_commitment_update = false;
4366 if !self.context.signer_pending_commitment_update {
4367 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4368 self.context.signer_pending_commitment_update = true;
4372 Ok(msgs::CommitmentUpdate {
4373 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4378 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4379 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4380 if self.context.channel_state.is_local_shutdown_sent() {
4381 assert!(self.context.shutdown_scriptpubkey.is_some());
4382 Some(msgs::Shutdown {
4383 channel_id: self.context.channel_id,
4384 scriptpubkey: self.get_closing_scriptpubkey(),
4389 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4390 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4392 /// Some links printed in log lines are included here to check them during build (when run with
4393 /// `cargo doc --document-private-items`):
4394 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4395 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4396 pub fn channel_reestablish<L: Deref, NS: Deref>(
4397 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4398 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4399 ) -> Result<ReestablishResponses, ChannelError>
4402 NS::Target: NodeSigner
4404 if !self.context.channel_state.is_peer_disconnected() {
4405 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4406 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4407 // just close here instead of trying to recover.
4408 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4411 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4412 msg.next_local_commitment_number == 0 {
4413 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4416 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4417 if msg.next_remote_commitment_number > 0 {
4418 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4419 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4420 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4421 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4422 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4424 if msg.next_remote_commitment_number > our_commitment_transaction {
4425 macro_rules! log_and_panic {
4426 ($err_msg: expr) => {
4427 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4428 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4431 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4432 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4433 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4434 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4435 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4436 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4437 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4438 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4442 // Before we change the state of the channel, we check if the peer is sending a very old
4443 // commitment transaction number, if yes we send a warning message.
4444 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4445 return Err(ChannelError::Warn(format!(
4446 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4447 msg.next_remote_commitment_number,
4448 our_commitment_transaction
4452 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4453 // remaining cases either succeed or ErrorMessage-fail).
4454 self.context.channel_state.clear_peer_disconnected();
4455 self.context.sent_message_awaiting_response = None;
4457 let shutdown_msg = self.get_outbound_shutdown();
4459 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4461 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4462 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4463 if !self.context.channel_state.is_our_channel_ready() ||
4464 self.context.channel_state.is_monitor_update_in_progress() {
4465 if msg.next_remote_commitment_number != 0 {
4466 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4468 // Short circuit the whole handler as there is nothing we can resend them
4469 return Ok(ReestablishResponses {
4470 channel_ready: None,
4471 raa: None, commitment_update: None,
4472 order: RAACommitmentOrder::CommitmentFirst,
4473 shutdown_msg, announcement_sigs,
4477 // We have OurChannelReady set!
4478 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4479 return Ok(ReestablishResponses {
4480 channel_ready: Some(msgs::ChannelReady {
4481 channel_id: self.context.channel_id(),
4482 next_per_commitment_point,
4483 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4485 raa: None, commitment_update: None,
4486 order: RAACommitmentOrder::CommitmentFirst,
4487 shutdown_msg, announcement_sigs,
4491 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4492 // Remote isn't waiting on any RevokeAndACK from us!
4493 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4495 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4496 if self.context.channel_state.is_monitor_update_in_progress() {
4497 self.context.monitor_pending_revoke_and_ack = true;
4500 Some(self.get_last_revoke_and_ack())
4503 debug_assert!(false, "All values should have been handled in the four cases above");
4504 return Err(ChannelError::Close(format!(
4505 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4506 msg.next_remote_commitment_number,
4507 our_commitment_transaction
4511 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4512 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4513 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4514 // the corresponding revoke_and_ack back yet.
4515 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4516 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4517 self.mark_awaiting_response();
4519 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4521 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4522 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4523 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4524 Some(msgs::ChannelReady {
4525 channel_id: self.context.channel_id(),
4526 next_per_commitment_point,
4527 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4531 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4532 if required_revoke.is_some() {
4533 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4535 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4538 Ok(ReestablishResponses {
4539 channel_ready, shutdown_msg, announcement_sigs,
4540 raa: required_revoke,
4541 commitment_update: None,
4542 order: self.context.resend_order.clone(),
4544 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4545 if required_revoke.is_some() {
4546 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4548 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4551 if self.context.channel_state.is_monitor_update_in_progress() {
4552 self.context.monitor_pending_commitment_signed = true;
4553 Ok(ReestablishResponses {
4554 channel_ready, shutdown_msg, announcement_sigs,
4555 commitment_update: None, raa: None,
4556 order: self.context.resend_order.clone(),
4559 Ok(ReestablishResponses {
4560 channel_ready, shutdown_msg, announcement_sigs,
4561 raa: required_revoke,
4562 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4563 order: self.context.resend_order.clone(),
4566 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4567 Err(ChannelError::Close(format!(
4568 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4569 msg.next_local_commitment_number,
4570 next_counterparty_commitment_number,
4573 Err(ChannelError::Close(format!(
4574 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4575 msg.next_local_commitment_number,
4576 next_counterparty_commitment_number,
4581 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4582 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4583 /// at which point they will be recalculated.
4584 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4586 where F::Target: FeeEstimator
4588 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4590 // Propose a range from our current Background feerate to our Normal feerate plus our
4591 // force_close_avoidance_max_fee_satoshis.
4592 // If we fail to come to consensus, we'll have to force-close.
4593 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4594 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4595 // that we don't expect to need fee bumping
4596 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4597 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4599 // The spec requires that (when the channel does not have anchors) we only send absolute
4600 // channel fees no greater than the absolute channel fee on the current commitment
4601 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4602 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4603 // some force-closure by old nodes, but we wanted to close the channel anyway.
4605 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4606 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4607 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4608 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4611 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4612 // below our dust limit, causing the output to disappear. We don't bother handling this
4613 // case, however, as this should only happen if a channel is closed before any (material)
4614 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4615 // come to consensus with our counterparty on appropriate fees, however it should be a
4616 // relatively rare case. We can revisit this later, though note that in order to determine
4617 // if the funders' output is dust we have to know the absolute fee we're going to use.
4618 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4619 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4620 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4621 // We always add force_close_avoidance_max_fee_satoshis to our normal
4622 // feerate-calculated fee, but allow the max to be overridden if we're using a
4623 // target feerate-calculated fee.
4624 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4625 proposed_max_feerate as u64 * tx_weight / 1000)
4627 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4630 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4631 self.context.closing_fee_limits.clone().unwrap()
4634 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4635 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4636 /// this point if we're the funder we should send the initial closing_signed, and in any case
4637 /// shutdown should complete within a reasonable timeframe.
4638 fn closing_negotiation_ready(&self) -> bool {
4639 self.context.closing_negotiation_ready()
4642 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4643 /// an Err if no progress is being made and the channel should be force-closed instead.
4644 /// Should be called on a one-minute timer.
4645 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4646 if self.closing_negotiation_ready() {
4647 if self.context.closing_signed_in_flight {
4648 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4650 self.context.closing_signed_in_flight = true;
4656 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4657 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4658 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4659 where F::Target: FeeEstimator, L::Target: Logger
4661 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4662 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4663 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4664 // that closing_negotiation_ready checks this case (as well as a few others).
4665 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4666 return Ok((None, None, None));
4669 if !self.context.is_outbound() {
4670 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4671 return self.closing_signed(fee_estimator, &msg);
4673 return Ok((None, None, None));
4676 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4677 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4678 if self.context.expecting_peer_commitment_signed {
4679 return Ok((None, None, None));
4682 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4684 assert!(self.context.shutdown_scriptpubkey.is_some());
4685 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4686 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4687 our_min_fee, our_max_fee, total_fee_satoshis);
4689 match &self.context.holder_signer {
4690 ChannelSignerType::Ecdsa(ecdsa) => {
4692 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4693 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4695 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4696 Ok((Some(msgs::ClosingSigned {
4697 channel_id: self.context.channel_id,
4698 fee_satoshis: total_fee_satoshis,
4700 fee_range: Some(msgs::ClosingSignedFeeRange {
4701 min_fee_satoshis: our_min_fee,
4702 max_fee_satoshis: our_max_fee,
4706 // TODO (taproot|arik)
4712 // Marks a channel as waiting for a response from the counterparty. If it's not received
4713 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4715 fn mark_awaiting_response(&mut self) {
4716 self.context.sent_message_awaiting_response = Some(0);
4719 /// Determines whether we should disconnect the counterparty due to not receiving a response
4720 /// within our expected timeframe.
4722 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4723 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4724 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4727 // Don't disconnect when we're not waiting on a response.
4730 *ticks_elapsed += 1;
4731 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4735 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4736 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4738 if self.context.channel_state.is_peer_disconnected() {
4739 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4741 if self.context.channel_state.is_pre_funded_state() {
4742 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4743 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4744 // can do that via error message without getting a connection fail anyway...
4745 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4747 for htlc in self.context.pending_inbound_htlcs.iter() {
4748 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4749 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4752 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4754 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4755 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4758 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4759 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4760 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4763 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4766 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4767 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4768 // any further commitment updates after we set LocalShutdownSent.
4769 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
4771 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4774 assert!(send_shutdown);
4775 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4776 Ok(scriptpubkey) => scriptpubkey,
4777 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4779 if !shutdown_scriptpubkey.is_compatible(their_features) {
4780 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4782 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4787 // From here on out, we may not fail!
4789 self.context.channel_state.set_remote_shutdown_sent();
4790 self.context.update_time_counter += 1;
4792 let monitor_update = if update_shutdown_script {
4793 self.context.latest_monitor_update_id += 1;
4794 let monitor_update = ChannelMonitorUpdate {
4795 update_id: self.context.latest_monitor_update_id,
4796 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4797 scriptpubkey: self.get_closing_scriptpubkey(),
4800 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4801 self.push_ret_blockable_mon_update(monitor_update)
4803 let shutdown = if send_shutdown {
4804 Some(msgs::Shutdown {
4805 channel_id: self.context.channel_id,
4806 scriptpubkey: self.get_closing_scriptpubkey(),
4810 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4811 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4812 // cell HTLCs and return them to fail the payment.
4813 self.context.holding_cell_update_fee = None;
4814 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4815 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4817 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4818 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4825 self.context.channel_state.set_local_shutdown_sent();
4826 self.context.update_time_counter += 1;
4828 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4831 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4832 let mut tx = closing_tx.trust().built_transaction().clone();
4834 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4836 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4837 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4838 let mut holder_sig = sig.serialize_der().to_vec();
4839 holder_sig.push(EcdsaSighashType::All as u8);
4840 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4841 cp_sig.push(EcdsaSighashType::All as u8);
4842 if funding_key[..] < counterparty_funding_key[..] {
4843 tx.input[0].witness.push(holder_sig);
4844 tx.input[0].witness.push(cp_sig);
4846 tx.input[0].witness.push(cp_sig);
4847 tx.input[0].witness.push(holder_sig);
4850 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4854 pub fn closing_signed<F: Deref>(
4855 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4856 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4857 where F::Target: FeeEstimator
4859 if !self.context.channel_state.is_both_sides_shutdown() {
4860 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4862 if self.context.channel_state.is_peer_disconnected() {
4863 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4865 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4866 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4868 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4869 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4872 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4873 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4876 if self.context.channel_state.is_monitor_update_in_progress() {
4877 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4878 return Ok((None, None, None));
4881 let funding_redeemscript = self.context.get_funding_redeemscript();
4882 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4883 if used_total_fee != msg.fee_satoshis {
4884 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4886 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4888 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4891 // The remote end may have decided to revoke their output due to inconsistent dust
4892 // limits, so check for that case by re-checking the signature here.
4893 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4894 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4895 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4899 for outp in closing_tx.trust().built_transaction().output.iter() {
4900 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4901 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4905 assert!(self.context.shutdown_scriptpubkey.is_some());
4906 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4907 if last_fee == msg.fee_satoshis {
4908 let shutdown_result = ShutdownResult {
4909 monitor_update: None,
4910 dropped_outbound_htlcs: Vec::new(),
4911 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4912 channel_id: self.context.channel_id,
4913 counterparty_node_id: self.context.counterparty_node_id,
4915 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4916 self.context.channel_state = ChannelState::ShutdownComplete;
4917 self.context.update_time_counter += 1;
4918 return Ok((None, Some(tx), Some(shutdown_result)));
4922 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4924 macro_rules! propose_fee {
4925 ($new_fee: expr) => {
4926 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4927 (closing_tx, $new_fee)
4929 self.build_closing_transaction($new_fee, false)
4932 return match &self.context.holder_signer {
4933 ChannelSignerType::Ecdsa(ecdsa) => {
4935 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4936 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4937 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4938 let shutdown_result = ShutdownResult {
4939 monitor_update: None,
4940 dropped_outbound_htlcs: Vec::new(),
4941 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4942 channel_id: self.context.channel_id,
4943 counterparty_node_id: self.context.counterparty_node_id,
4945 self.context.channel_state = ChannelState::ShutdownComplete;
4946 self.context.update_time_counter += 1;
4947 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4948 (Some(tx), Some(shutdown_result))
4953 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4954 Ok((Some(msgs::ClosingSigned {
4955 channel_id: self.context.channel_id,
4956 fee_satoshis: used_fee,
4958 fee_range: Some(msgs::ClosingSignedFeeRange {
4959 min_fee_satoshis: our_min_fee,
4960 max_fee_satoshis: our_max_fee,
4962 }), signed_tx, shutdown_result))
4964 // TODO (taproot|arik)
4971 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4972 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4973 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4975 if max_fee_satoshis < our_min_fee {
4976 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4978 if min_fee_satoshis > our_max_fee {
4979 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4982 if !self.context.is_outbound() {
4983 // They have to pay, so pick the highest fee in the overlapping range.
4984 // We should never set an upper bound aside from their full balance
4985 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4986 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4988 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4989 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4990 msg.fee_satoshis, our_min_fee, our_max_fee)));
4992 // The proposed fee is in our acceptable range, accept it and broadcast!
4993 propose_fee!(msg.fee_satoshis);
4996 // Old fee style negotiation. We don't bother to enforce whether they are complying
4997 // with the "making progress" requirements, we just comply and hope for the best.
4998 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4999 if msg.fee_satoshis > last_fee {
5000 if msg.fee_satoshis < our_max_fee {
5001 propose_fee!(msg.fee_satoshis);
5002 } else if last_fee < our_max_fee {
5003 propose_fee!(our_max_fee);
5005 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5008 if msg.fee_satoshis > our_min_fee {
5009 propose_fee!(msg.fee_satoshis);
5010 } else if last_fee > our_min_fee {
5011 propose_fee!(our_min_fee);
5013 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5017 if msg.fee_satoshis < our_min_fee {
5018 propose_fee!(our_min_fee);
5019 } else if msg.fee_satoshis > our_max_fee {
5020 propose_fee!(our_max_fee);
5022 propose_fee!(msg.fee_satoshis);
5028 fn internal_htlc_satisfies_config(
5029 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
5030 ) -> Result<(), (&'static str, u16)> {
5031 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
5032 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
5033 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
5034 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5036 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5037 0x1000 | 12, // fee_insufficient
5040 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5042 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5043 0x1000 | 13, // incorrect_cltv_expiry
5049 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5050 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5051 /// unsuccessful, falls back to the previous one if one exists.
5052 pub fn htlc_satisfies_config(
5053 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5054 ) -> Result<(), (&'static str, u16)> {
5055 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5057 if let Some(prev_config) = self.context.prev_config() {
5058 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5065 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5066 self.context.cur_holder_commitment_transaction_number + 1
5069 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5070 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
5073 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5074 self.context.cur_counterparty_commitment_transaction_number + 2
5078 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5079 &self.context.holder_signer
5083 pub fn get_value_stat(&self) -> ChannelValueStat {
5085 value_to_self_msat: self.context.value_to_self_msat,
5086 channel_value_msat: self.context.channel_value_satoshis * 1000,
5087 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5088 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5089 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5090 holding_cell_outbound_amount_msat: {
5092 for h in self.context.holding_cell_htlc_updates.iter() {
5094 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5102 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5103 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5107 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5108 /// Allowed in any state (including after shutdown)
5109 pub fn is_awaiting_monitor_update(&self) -> bool {
5110 self.context.channel_state.is_monitor_update_in_progress()
5113 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5114 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5115 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5116 self.context.blocked_monitor_updates[0].update.update_id - 1
5119 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5120 /// further blocked monitor update exists after the next.
5121 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5122 if self.context.blocked_monitor_updates.is_empty() { return None; }
5123 Some((self.context.blocked_monitor_updates.remove(0).update,
5124 !self.context.blocked_monitor_updates.is_empty()))
5127 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5128 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5129 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5130 -> Option<ChannelMonitorUpdate> {
5131 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5132 if !release_monitor {
5133 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5142 pub fn blocked_monitor_updates_pending(&self) -> usize {
5143 self.context.blocked_monitor_updates.len()
5146 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5147 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5148 /// transaction. If the channel is inbound, this implies simply that the channel has not
5150 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5151 if !self.is_awaiting_monitor_update() { return false; }
5153 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5154 if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
5156 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5157 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5158 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5161 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5162 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5163 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5164 // waiting for the initial monitor persistence. Thus, we check if our commitment
5165 // transaction numbers have both been iterated only exactly once (for the
5166 // funding_signed), and we're awaiting monitor update.
5168 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5169 // only way to get an awaiting-monitor-update state during initial funding is if the
5170 // initial monitor persistence is still pending).
5172 // Because deciding we're awaiting initial broadcast spuriously could result in
5173 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5174 // we hard-assert here, even in production builds.
5175 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5176 assert!(self.context.monitor_pending_channel_ready);
5177 assert_eq!(self.context.latest_monitor_update_id, 0);
5183 /// Returns true if our channel_ready has been sent
5184 pub fn is_our_channel_ready(&self) -> bool {
5185 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5186 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5189 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5190 pub fn received_shutdown(&self) -> bool {
5191 self.context.channel_state.is_remote_shutdown_sent()
5194 /// Returns true if we either initiated or agreed to shut down the channel.
5195 pub fn sent_shutdown(&self) -> bool {
5196 self.context.channel_state.is_local_shutdown_sent()
5199 /// Returns true if this channel is fully shut down. True here implies that no further actions
5200 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5201 /// will be handled appropriately by the chain monitor.
5202 pub fn is_shutdown(&self) -> bool {
5203 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5206 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5207 self.context.channel_update_status
5210 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5211 self.context.update_time_counter += 1;
5212 self.context.channel_update_status = status;
5215 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5217 // * always when a new block/transactions are confirmed with the new height
5218 // * when funding is signed with a height of 0
5219 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5223 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5224 if funding_tx_confirmations <= 0 {
5225 self.context.funding_tx_confirmation_height = 0;
5228 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5232 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5233 // channel_ready yet.
5234 if self.context.signer_pending_funding {
5238 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5239 // channel_ready until the entire batch is ready.
5240 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
5241 self.context.channel_state.set_our_channel_ready();
5243 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5244 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5245 self.context.update_time_counter += 1;
5247 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5248 // We got a reorg but not enough to trigger a force close, just ignore.
5251 if self.context.funding_tx_confirmation_height != 0 &&
5252 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5254 // We should never see a funding transaction on-chain until we've received
5255 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5256 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5257 // however, may do this and we shouldn't treat it as a bug.
5258 #[cfg(not(fuzzing))]
5259 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5260 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5261 self.context.channel_state.to_u32());
5263 // We got a reorg but not enough to trigger a force close, just ignore.
5267 if need_commitment_update {
5268 if !self.context.channel_state.is_monitor_update_in_progress() {
5269 if !self.context.channel_state.is_peer_disconnected() {
5270 let next_per_commitment_point =
5271 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5272 return Some(msgs::ChannelReady {
5273 channel_id: self.context.channel_id,
5274 next_per_commitment_point,
5275 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5279 self.context.monitor_pending_channel_ready = true;
5285 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5286 /// In the first case, we store the confirmation height and calculating the short channel id.
5287 /// In the second, we simply return an Err indicating we need to be force-closed now.
5288 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5289 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5290 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5291 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5293 NS::Target: NodeSigner,
5296 let mut msgs = (None, None);
5297 if let Some(funding_txo) = self.context.get_funding_txo() {
5298 for &(index_in_block, tx) in txdata.iter() {
5299 // Check if the transaction is the expected funding transaction, and if it is,
5300 // check that it pays the right amount to the right script.
5301 if self.context.funding_tx_confirmation_height == 0 {
5302 if tx.txid() == funding_txo.txid {
5303 let txo_idx = funding_txo.index as usize;
5304 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5305 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5306 if self.context.is_outbound() {
5307 // If we generated the funding transaction and it doesn't match what it
5308 // should, the client is really broken and we should just panic and
5309 // tell them off. That said, because hash collisions happen with high
5310 // probability in fuzzing mode, if we're fuzzing we just close the
5311 // channel and move on.
5312 #[cfg(not(fuzzing))]
5313 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5315 self.context.update_time_counter += 1;
5316 let err_reason = "funding tx had wrong script/value or output index";
5317 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5319 if self.context.is_outbound() {
5320 if !tx.is_coin_base() {
5321 for input in tx.input.iter() {
5322 if input.witness.is_empty() {
5323 // We generated a malleable funding transaction, implying we've
5324 // just exposed ourselves to funds loss to our counterparty.
5325 #[cfg(not(fuzzing))]
5326 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5331 self.context.funding_tx_confirmation_height = height;
5332 self.context.funding_tx_confirmed_in = Some(*block_hash);
5333 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5334 Ok(scid) => Some(scid),
5335 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5338 // If this is a coinbase transaction and not a 0-conf channel
5339 // we should update our min_depth to 100 to handle coinbase maturity
5340 if tx.is_coin_base() &&
5341 self.context.minimum_depth.unwrap_or(0) > 0 &&
5342 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5343 self.context.minimum_depth = Some(COINBASE_MATURITY);
5346 // If we allow 1-conf funding, we may need to check for channel_ready here and
5347 // send it immediately instead of waiting for a best_block_updated call (which
5348 // may have already happened for this block).
5349 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5350 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5351 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5352 msgs = (Some(channel_ready), announcement_sigs);
5355 for inp in tx.input.iter() {
5356 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5357 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5358 return Err(ClosureReason::CommitmentTxConfirmed);
5366 /// When a new block is connected, we check the height of the block against outbound holding
5367 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5368 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5369 /// handled by the ChannelMonitor.
5371 /// If we return Err, the channel may have been closed, at which point the standard
5372 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5375 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5377 pub fn best_block_updated<NS: Deref, L: Deref>(
5378 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5379 node_signer: &NS, user_config: &UserConfig, logger: &L
5380 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5382 NS::Target: NodeSigner,
5385 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5388 fn do_best_block_updated<NS: Deref, L: Deref>(
5389 &mut self, height: u32, highest_header_time: u32,
5390 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5391 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5393 NS::Target: NodeSigner,
5396 let mut timed_out_htlcs = Vec::new();
5397 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5398 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5400 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5401 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5403 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5404 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5405 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5413 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5415 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5416 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5417 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5419 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5420 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5423 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5424 self.context.channel_state.is_our_channel_ready() {
5425 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5426 if self.context.funding_tx_confirmation_height == 0 {
5427 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5428 // zero if it has been reorged out, however in either case, our state flags
5429 // indicate we've already sent a channel_ready
5430 funding_tx_confirmations = 0;
5433 // If we've sent channel_ready (or have both sent and received channel_ready), and
5434 // the funding transaction has become unconfirmed,
5435 // close the channel and hope we can get the latest state on chain (because presumably
5436 // the funding transaction is at least still in the mempool of most nodes).
5438 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5439 // 0-conf channel, but not doing so may lead to the
5440 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5442 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5443 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5444 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5445 return Err(ClosureReason::ProcessingError { err: err_reason });
5447 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5448 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5449 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5450 // If funding_tx_confirmed_in is unset, the channel must not be active
5451 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5452 assert!(!self.context.channel_state.is_our_channel_ready());
5453 return Err(ClosureReason::FundingTimedOut);
5456 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5457 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5459 Ok((None, timed_out_htlcs, announcement_sigs))
5462 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5463 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5464 /// before the channel has reached channel_ready and we can just wait for more blocks.
5465 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5466 if self.context.funding_tx_confirmation_height != 0 {
5467 // We handle the funding disconnection by calling best_block_updated with a height one
5468 // below where our funding was connected, implying a reorg back to conf_height - 1.
5469 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5470 // We use the time field to bump the current time we set on channel updates if its
5471 // larger. If we don't know that time has moved forward, we can just set it to the last
5472 // time we saw and it will be ignored.
5473 let best_time = self.context.update_time_counter;
5474 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
5475 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5476 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5477 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5478 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5484 // We never learned about the funding confirmation anyway, just ignore
5489 // Methods to get unprompted messages to send to the remote end (or where we already returned
5490 // something in the handler for the message that prompted this message):
5492 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5493 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5494 /// directions). Should be used for both broadcasted announcements and in response to an
5495 /// AnnouncementSignatures message from the remote peer.
5497 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5500 /// This will only return ChannelError::Ignore upon failure.
5502 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5503 fn get_channel_announcement<NS: Deref>(
5504 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5505 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5506 if !self.context.config.announced_channel {
5507 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5509 if !self.context.is_usable() {
5510 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5513 let short_channel_id = self.context.get_short_channel_id()
5514 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5515 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5516 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5517 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5518 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5520 let msg = msgs::UnsignedChannelAnnouncement {
5521 features: channelmanager::provided_channel_features(&user_config),
5524 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5525 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5526 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5527 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5528 excess_data: Vec::new(),
5534 fn get_announcement_sigs<NS: Deref, L: Deref>(
5535 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5536 best_block_height: u32, logger: &L
5537 ) -> Option<msgs::AnnouncementSignatures>
5539 NS::Target: NodeSigner,
5542 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5546 if !self.context.is_usable() {
5550 if self.context.channel_state.is_peer_disconnected() {
5551 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5555 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5559 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5560 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5563 log_trace!(logger, "{:?}", e);
5567 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5569 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5574 match &self.context.holder_signer {
5575 ChannelSignerType::Ecdsa(ecdsa) => {
5576 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5578 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5583 let short_channel_id = match self.context.get_short_channel_id() {
5585 None => return None,
5588 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5590 Some(msgs::AnnouncementSignatures {
5591 channel_id: self.context.channel_id(),
5593 node_signature: our_node_sig,
5594 bitcoin_signature: our_bitcoin_sig,
5597 // TODO (taproot|arik)
5603 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5605 fn sign_channel_announcement<NS: Deref>(
5606 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5607 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5608 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5609 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5610 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5611 let were_node_one = announcement.node_id_1 == our_node_key;
5613 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5614 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5615 match &self.context.holder_signer {
5616 ChannelSignerType::Ecdsa(ecdsa) => {
5617 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5618 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5619 Ok(msgs::ChannelAnnouncement {
5620 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5621 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5622 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5623 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5624 contents: announcement,
5627 // TODO (taproot|arik)
5632 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5636 /// Processes an incoming announcement_signatures message, providing a fully-signed
5637 /// channel_announcement message which we can broadcast and storing our counterparty's
5638 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5639 pub fn announcement_signatures<NS: Deref>(
5640 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5641 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5642 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5643 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5645 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5647 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5648 return Err(ChannelError::Close(format!(
5649 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5650 &announcement, self.context.get_counterparty_node_id())));
5652 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5653 return Err(ChannelError::Close(format!(
5654 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5655 &announcement, self.context.counterparty_funding_pubkey())));
5658 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5659 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5660 return Err(ChannelError::Ignore(
5661 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5664 self.sign_channel_announcement(node_signer, announcement)
5667 /// Gets a signed channel_announcement for this channel, if we previously received an
5668 /// announcement_signatures from our counterparty.
5669 pub fn get_signed_channel_announcement<NS: Deref>(
5670 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5671 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5672 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5675 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5677 Err(_) => return None,
5679 match self.sign_channel_announcement(node_signer, announcement) {
5680 Ok(res) => Some(res),
5685 /// May panic if called on a channel that wasn't immediately-previously
5686 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5687 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5688 assert!(self.context.channel_state.is_peer_disconnected());
5689 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5690 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5691 // current to_remote balances. However, it no longer has any use, and thus is now simply
5692 // set to a dummy (but valid, as required by the spec) public key.
5693 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5694 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5695 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5696 let mut pk = [2; 33]; pk[1] = 0xff;
5697 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5698 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5699 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5700 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5703 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5706 self.mark_awaiting_response();
5707 msgs::ChannelReestablish {
5708 channel_id: self.context.channel_id(),
5709 // The protocol has two different commitment number concepts - the "commitment
5710 // transaction number", which starts from 0 and counts up, and the "revocation key
5711 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5712 // commitment transaction numbers by the index which will be used to reveal the
5713 // revocation key for that commitment transaction, which means we have to convert them
5714 // to protocol-level commitment numbers here...
5716 // next_local_commitment_number is the next commitment_signed number we expect to
5717 // receive (indicating if they need to resend one that we missed).
5718 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5719 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5720 // receive, however we track it by the next commitment number for a remote transaction
5721 // (which is one further, as they always revoke previous commitment transaction, not
5722 // the one we send) so we have to decrement by 1. Note that if
5723 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5724 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
5726 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5727 your_last_per_commitment_secret: remote_last_secret,
5728 my_current_per_commitment_point: dummy_pubkey,
5729 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5730 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5731 // txid of that interactive transaction, else we MUST NOT set it.
5732 next_funding_txid: None,
5737 // Send stuff to our remote peers:
5739 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5740 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5741 /// commitment update.
5743 /// `Err`s will only be [`ChannelError::Ignore`].
5744 pub fn queue_add_htlc<F: Deref, L: Deref>(
5745 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5746 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5747 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5748 ) -> Result<(), ChannelError>
5749 where F::Target: FeeEstimator, L::Target: Logger
5752 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5753 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5754 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5756 if let ChannelError::Ignore(_) = err { /* fine */ }
5757 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5762 /// Adds a pending outbound HTLC to this channel, note that you probably want
5763 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5765 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5767 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5768 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5770 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5771 /// we may not yet have sent the previous commitment update messages and will need to
5772 /// regenerate them.
5774 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5775 /// on this [`Channel`] if `force_holding_cell` is false.
5777 /// `Err`s will only be [`ChannelError::Ignore`].
5778 fn send_htlc<F: Deref, L: Deref>(
5779 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5780 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5781 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5782 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5783 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5784 where F::Target: FeeEstimator, L::Target: Logger
5786 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5787 self.context.channel_state.is_local_shutdown_sent() ||
5788 self.context.channel_state.is_remote_shutdown_sent()
5790 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5792 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5793 if amount_msat > channel_total_msat {
5794 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5797 if amount_msat == 0 {
5798 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5801 let available_balances = self.context.get_available_balances(fee_estimator);
5802 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5803 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5804 available_balances.next_outbound_htlc_minimum_msat)));
5807 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5808 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5809 available_balances.next_outbound_htlc_limit_msat)));
5812 if self.context.channel_state.is_peer_disconnected() {
5813 // Note that this should never really happen, if we're !is_live() on receipt of an
5814 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5815 // the user to send directly into a !is_live() channel. However, if we
5816 // disconnected during the time the previous hop was doing the commitment dance we may
5817 // end up getting here after the forwarding delay. In any case, returning an
5818 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5819 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5822 let need_holding_cell = self.context.channel_state.should_force_holding_cell();
5823 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5824 payment_hash, amount_msat,
5825 if force_holding_cell { "into holding cell" }
5826 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5827 else { "to peer" });
5829 if need_holding_cell {
5830 force_holding_cell = true;
5833 // Now update local state:
5834 if force_holding_cell {
5835 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5840 onion_routing_packet,
5847 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5848 htlc_id: self.context.next_holder_htlc_id,
5850 payment_hash: payment_hash.clone(),
5852 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5858 let res = msgs::UpdateAddHTLC {
5859 channel_id: self.context.channel_id,
5860 htlc_id: self.context.next_holder_htlc_id,
5864 onion_routing_packet,
5868 self.context.next_holder_htlc_id += 1;
5873 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5874 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5875 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5876 // fail to generate this, we still are at least at a position where upgrading their status
5878 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5879 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5880 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5882 if let Some(state) = new_state {
5883 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5887 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5888 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5889 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5890 // Grab the preimage, if it exists, instead of cloning
5891 let mut reason = OutboundHTLCOutcome::Success(None);
5892 mem::swap(outcome, &mut reason);
5893 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5896 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5897 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5898 debug_assert!(!self.context.is_outbound());
5899 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5900 self.context.feerate_per_kw = feerate;
5901 self.context.pending_update_fee = None;
5904 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5906 let (mut htlcs_ref, counterparty_commitment_tx) =
5907 self.build_commitment_no_state_update(logger);
5908 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5909 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5910 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5912 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5913 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5916 self.context.latest_monitor_update_id += 1;
5917 let monitor_update = ChannelMonitorUpdate {
5918 update_id: self.context.latest_monitor_update_id,
5919 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5920 commitment_txid: counterparty_commitment_txid,
5921 htlc_outputs: htlcs.clone(),
5922 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5923 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5924 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5925 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5926 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5929 self.context.channel_state.set_awaiting_remote_revoke();
5933 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5934 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5935 where L::Target: Logger
5937 let counterparty_keys = self.context.build_remote_transaction_keys();
5938 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5939 let counterparty_commitment_tx = commitment_stats.tx;
5941 #[cfg(any(test, fuzzing))]
5943 if !self.context.is_outbound() {
5944 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5945 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5946 if let Some(info) = projected_commit_tx_info {
5947 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5948 if info.total_pending_htlcs == total_pending_htlcs
5949 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5950 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5951 && info.feerate == self.context.feerate_per_kw {
5952 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5953 assert_eq!(actual_fee, info.fee);
5959 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5962 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5963 /// generation when we shouldn't change HTLC/channel state.
5964 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5965 // Get the fee tests from `build_commitment_no_state_update`
5966 #[cfg(any(test, fuzzing))]
5967 self.build_commitment_no_state_update(logger);
5969 let counterparty_keys = self.context.build_remote_transaction_keys();
5970 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5971 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5973 match &self.context.holder_signer {
5974 ChannelSignerType::Ecdsa(ecdsa) => {
5975 let (signature, htlc_signatures);
5978 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5979 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5983 let res = ecdsa.sign_counterparty_commitment(
5984 &commitment_stats.tx,
5985 commitment_stats.inbound_htlc_preimages,
5986 commitment_stats.outbound_htlc_preimages,
5987 &self.context.secp_ctx,
5988 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5990 htlc_signatures = res.1;
5992 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5993 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5994 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5995 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5997 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5998 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5999 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6000 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
6001 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
6002 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
6006 Ok((msgs::CommitmentSigned {
6007 channel_id: self.context.channel_id,
6011 partial_signature_with_nonce: None,
6012 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6014 // TODO (taproot|arik)
6020 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6021 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6023 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6024 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6025 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
6026 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
6027 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6028 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6029 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
6030 where F::Target: FeeEstimator, L::Target: Logger
6032 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
6033 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
6034 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6037 let monitor_update = self.build_commitment_no_status_check(logger);
6038 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6039 Ok(self.push_ret_blockable_mon_update(monitor_update))
6045 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
6047 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
6048 let new_forwarding_info = Some(CounterpartyForwardingInfo {
6049 fee_base_msat: msg.contents.fee_base_msat,
6050 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6051 cltv_expiry_delta: msg.contents.cltv_expiry_delta
6053 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
6055 self.context.counterparty_forwarding_info = new_forwarding_info;
6061 /// Begins the shutdown process, getting a message for the remote peer and returning all
6062 /// holding cell HTLCs for payment failure.
6063 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6064 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6065 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6067 for htlc in self.context.pending_outbound_htlcs.iter() {
6068 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6069 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6072 if self.context.channel_state.is_local_shutdown_sent() {
6073 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6075 else if self.context.channel_state.is_remote_shutdown_sent() {
6076 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6078 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6079 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6081 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6082 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6083 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6086 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6089 // use override shutdown script if provided
6090 let shutdown_scriptpubkey = match override_shutdown_script {
6091 Some(script) => script,
6093 // otherwise, use the shutdown scriptpubkey provided by the signer
6094 match signer_provider.get_shutdown_scriptpubkey() {
6095 Ok(scriptpubkey) => scriptpubkey,
6096 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6100 if !shutdown_scriptpubkey.is_compatible(their_features) {
6101 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6103 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6108 // From here on out, we may not fail!
6109 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6110 self.context.channel_state.set_local_shutdown_sent();
6111 self.context.update_time_counter += 1;
6113 let monitor_update = if update_shutdown_script {
6114 self.context.latest_monitor_update_id += 1;
6115 let monitor_update = ChannelMonitorUpdate {
6116 update_id: self.context.latest_monitor_update_id,
6117 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6118 scriptpubkey: self.get_closing_scriptpubkey(),
6121 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6122 self.push_ret_blockable_mon_update(monitor_update)
6124 let shutdown = msgs::Shutdown {
6125 channel_id: self.context.channel_id,
6126 scriptpubkey: self.get_closing_scriptpubkey(),
6129 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6130 // our shutdown until we've committed all of the pending changes.
6131 self.context.holding_cell_update_fee = None;
6132 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6133 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6135 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6136 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6143 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6144 "we can't both complete shutdown and return a monitor update");
6146 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6149 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6150 self.context.holding_cell_htlc_updates.iter()
6151 .flat_map(|htlc_update| {
6153 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6154 => Some((source, payment_hash)),
6158 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6162 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6163 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6164 pub context: ChannelContext<SP>,
6165 pub unfunded_context: UnfundedChannelContext,
6168 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6169 pub fn new<ES: Deref, F: Deref>(
6170 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6171 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6172 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6173 ) -> Result<OutboundV1Channel<SP>, APIError>
6174 where ES::Target: EntropySource,
6175 F::Target: FeeEstimator
6177 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6178 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6179 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6180 let pubkeys = holder_signer.pubkeys().clone();
6182 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6183 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6185 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6186 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6188 let channel_value_msat = channel_value_satoshis * 1000;
6189 if push_msat > channel_value_msat {
6190 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6192 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6193 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6195 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6196 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6197 // Protocol level safety check in place, although it should never happen because
6198 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6199 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6202 let channel_type = Self::get_initial_channel_type(&config, their_features);
6203 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6205 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6206 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6208 (ConfirmationTarget::NonAnchorChannelFee, 0)
6210 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6212 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6213 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6214 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6215 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6218 let mut secp_ctx = Secp256k1::new();
6219 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6221 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6222 match signer_provider.get_shutdown_scriptpubkey() {
6223 Ok(scriptpubkey) => Some(scriptpubkey),
6224 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6228 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6229 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6230 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6234 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6235 Ok(script) => script,
6236 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6239 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6242 context: ChannelContext {
6245 config: LegacyChannelConfig {
6246 options: config.channel_config.clone(),
6247 announced_channel: config.channel_handshake_config.announced_channel,
6248 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6253 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6255 channel_id: temporary_channel_id,
6256 temporary_channel_id: Some(temporary_channel_id),
6257 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6258 announcement_sigs_state: AnnouncementSigsState::NotSent,
6260 channel_value_satoshis,
6262 latest_monitor_update_id: 0,
6264 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6265 shutdown_scriptpubkey,
6268 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6269 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6272 pending_inbound_htlcs: Vec::new(),
6273 pending_outbound_htlcs: Vec::new(),
6274 holding_cell_htlc_updates: Vec::new(),
6275 pending_update_fee: None,
6276 holding_cell_update_fee: None,
6277 next_holder_htlc_id: 0,
6278 next_counterparty_htlc_id: 0,
6279 update_time_counter: 1,
6281 resend_order: RAACommitmentOrder::CommitmentFirst,
6283 monitor_pending_channel_ready: false,
6284 monitor_pending_revoke_and_ack: false,
6285 monitor_pending_commitment_signed: false,
6286 monitor_pending_forwards: Vec::new(),
6287 monitor_pending_failures: Vec::new(),
6288 monitor_pending_finalized_fulfills: Vec::new(),
6290 signer_pending_commitment_update: false,
6291 signer_pending_funding: false,
6293 #[cfg(debug_assertions)]
6294 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6295 #[cfg(debug_assertions)]
6296 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6298 last_sent_closing_fee: None,
6299 pending_counterparty_closing_signed: None,
6300 expecting_peer_commitment_signed: false,
6301 closing_fee_limits: None,
6302 target_closing_feerate_sats_per_kw: None,
6304 funding_tx_confirmed_in: None,
6305 funding_tx_confirmation_height: 0,
6306 short_channel_id: None,
6307 channel_creation_height: current_chain_height,
6309 feerate_per_kw: commitment_feerate,
6310 counterparty_dust_limit_satoshis: 0,
6311 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6312 counterparty_max_htlc_value_in_flight_msat: 0,
6313 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6314 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6315 holder_selected_channel_reserve_satoshis,
6316 counterparty_htlc_minimum_msat: 0,
6317 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6318 counterparty_max_accepted_htlcs: 0,
6319 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6320 minimum_depth: None, // Filled in in accept_channel
6322 counterparty_forwarding_info: None,
6324 channel_transaction_parameters: ChannelTransactionParameters {
6325 holder_pubkeys: pubkeys,
6326 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6327 is_outbound_from_holder: true,
6328 counterparty_parameters: None,
6329 funding_outpoint: None,
6330 channel_type_features: channel_type.clone()
6332 funding_transaction: None,
6333 is_batch_funding: None,
6335 counterparty_cur_commitment_point: None,
6336 counterparty_prev_commitment_point: None,
6337 counterparty_node_id,
6339 counterparty_shutdown_scriptpubkey: None,
6341 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6343 channel_update_status: ChannelUpdateStatus::Enabled,
6344 closing_signed_in_flight: false,
6346 announcement_sigs: None,
6348 #[cfg(any(test, fuzzing))]
6349 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6350 #[cfg(any(test, fuzzing))]
6351 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6353 workaround_lnd_bug_4006: None,
6354 sent_message_awaiting_response: None,
6356 latest_inbound_scid_alias: None,
6357 outbound_scid_alias,
6359 channel_pending_event_emitted: false,
6360 channel_ready_event_emitted: false,
6362 #[cfg(any(test, fuzzing))]
6363 historical_inbound_htlc_fulfills: HashSet::new(),
6368 blocked_monitor_updates: Vec::new(),
6370 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6374 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
6375 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6376 let counterparty_keys = self.context.build_remote_transaction_keys();
6377 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6378 let signature = match &self.context.holder_signer {
6379 // TODO (taproot|arik): move match into calling method for Taproot
6380 ChannelSignerType::Ecdsa(ecdsa) => {
6381 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
6382 .map(|(sig, _)| sig).ok()?
6384 // TODO (taproot|arik)
6389 if self.context.signer_pending_funding {
6390 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
6391 self.context.signer_pending_funding = false;
6394 Some(msgs::FundingCreated {
6395 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
6396 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
6397 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
6400 partial_signature_with_nonce: None,
6402 next_local_nonce: None,
6406 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6407 /// a funding_created message for the remote peer.
6408 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6409 /// or if called on an inbound channel.
6410 /// Note that channel_id changes during this call!
6411 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6412 /// If an Err is returned, it is a ChannelError::Close.
6413 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6414 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
6415 if !self.context.is_outbound() {
6416 panic!("Tried to create outbound funding_created message on an inbound channel!");
6419 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6420 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6422 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6424 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6425 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6426 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6427 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6430 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6431 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6433 // Now that we're past error-generating stuff, update our local state:
6435 self.context.channel_state = ChannelState::FundingNegotiated;
6436 self.context.channel_id = funding_txo.to_channel_id();
6438 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6439 // We can skip this if it is a zero-conf channel.
6440 if funding_transaction.is_coin_base() &&
6441 self.context.minimum_depth.unwrap_or(0) > 0 &&
6442 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6443 self.context.minimum_depth = Some(COINBASE_MATURITY);
6446 self.context.funding_transaction = Some(funding_transaction);
6447 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6449 let funding_created = self.get_funding_created_msg(logger);
6450 if funding_created.is_none() {
6451 if !self.context.signer_pending_funding {
6452 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6453 self.context.signer_pending_funding = true;
6460 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6461 // The default channel type (ie the first one we try) depends on whether the channel is
6462 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6463 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6464 // with no other changes, and fall back to `only_static_remotekey`.
6465 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6466 if !config.channel_handshake_config.announced_channel &&
6467 config.channel_handshake_config.negotiate_scid_privacy &&
6468 their_features.supports_scid_privacy() {
6469 ret.set_scid_privacy_required();
6472 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6473 // set it now. If they don't understand it, we'll fall back to our default of
6474 // `only_static_remotekey`.
6475 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6476 their_features.supports_anchors_zero_fee_htlc_tx() {
6477 ret.set_anchors_zero_fee_htlc_tx_required();
6483 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6484 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6485 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6486 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6487 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6488 ) -> Result<msgs::OpenChannel, ()>
6490 F::Target: FeeEstimator
6492 if !self.context.is_outbound() ||
6494 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6495 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6500 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6501 // We've exhausted our options
6504 // We support opening a few different types of channels. Try removing our additional
6505 // features one by one until we've either arrived at our default or the counterparty has
6508 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6509 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6510 // checks whether the counterparty supports every feature, this would only happen if the
6511 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6513 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6514 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6515 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6516 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6517 } else if self.context.channel_type.supports_scid_privacy() {
6518 self.context.channel_type.clear_scid_privacy();
6520 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6522 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6523 Ok(self.get_open_channel(chain_hash))
6526 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6527 if !self.context.is_outbound() {
6528 panic!("Tried to open a channel for an inbound channel?");
6530 if self.context.have_received_message() {
6531 panic!("Cannot generate an open_channel after we've moved forward");
6534 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6535 panic!("Tried to send an open_channel for a channel that has already advanced");
6538 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6539 let keys = self.context.get_holder_pubkeys();
6543 temporary_channel_id: self.context.channel_id,
6544 funding_satoshis: self.context.channel_value_satoshis,
6545 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6546 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6547 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6548 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6549 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6550 feerate_per_kw: self.context.feerate_per_kw as u32,
6551 to_self_delay: self.context.get_holder_selected_contest_delay(),
6552 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6553 funding_pubkey: keys.funding_pubkey,
6554 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6555 payment_point: keys.payment_point,
6556 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6557 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6558 first_per_commitment_point,
6559 channel_flags: if self.context.config.announced_channel {1} else {0},
6560 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6561 Some(script) => script.clone().into_inner(),
6562 None => Builder::new().into_script(),
6564 channel_type: Some(self.context.channel_type.clone()),
6569 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6570 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6572 // Check sanity of message fields:
6573 if !self.context.is_outbound() {
6574 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6576 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
6577 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6579 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6580 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6582 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6583 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6585 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6586 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6588 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6589 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6590 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6592 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6593 if msg.htlc_minimum_msat >= full_channel_value_msat {
6594 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6596 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6597 if msg.to_self_delay > max_delay_acceptable {
6598 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6600 if msg.max_accepted_htlcs < 1 {
6601 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6603 if msg.max_accepted_htlcs > MAX_HTLCS {
6604 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6607 // Now check against optional parameters as set by config...
6608 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6609 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6611 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6612 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6614 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6615 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6617 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6618 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6620 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6621 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6623 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6624 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6626 if msg.minimum_depth > peer_limits.max_minimum_depth {
6627 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6630 if let Some(ty) = &msg.channel_type {
6631 if *ty != self.context.channel_type {
6632 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6634 } else if their_features.supports_channel_type() {
6635 // Assume they've accepted the channel type as they said they understand it.
6637 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6638 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6639 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6641 self.context.channel_type = channel_type.clone();
6642 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6645 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6646 match &msg.shutdown_scriptpubkey {
6647 &Some(ref script) => {
6648 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6649 if script.len() == 0 {
6652 if !script::is_bolt2_compliant(&script, their_features) {
6653 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6655 Some(script.clone())
6658 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6660 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6665 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6666 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6667 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6668 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6669 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6671 if peer_limits.trust_own_funding_0conf {
6672 self.context.minimum_depth = Some(msg.minimum_depth);
6674 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6677 let counterparty_pubkeys = ChannelPublicKeys {
6678 funding_pubkey: msg.funding_pubkey,
6679 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6680 payment_point: msg.payment_point,
6681 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6682 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6685 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6686 selected_contest_delay: msg.to_self_delay,
6687 pubkeys: counterparty_pubkeys,
6690 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6691 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6693 self.context.channel_state = ChannelState::NegotiatingFunding(
6694 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6696 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6701 /// Handles a funding_signed message from the remote end.
6702 /// If this call is successful, broadcast the funding transaction (and not before!)
6703 pub fn funding_signed<L: Deref>(
6704 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
6705 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
6709 if !self.context.is_outbound() {
6710 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
6712 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
6713 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
6715 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6716 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6717 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6718 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6721 let funding_script = self.context.get_funding_redeemscript();
6723 let counterparty_keys = self.context.build_remote_transaction_keys();
6724 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6725 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6726 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6728 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6729 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6731 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6732 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
6734 let trusted_tx = initial_commitment_tx.trust();
6735 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6736 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6737 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
6738 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
6739 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
6743 let holder_commitment_tx = HolderCommitmentTransaction::new(
6744 initial_commitment_tx,
6747 &self.context.get_holder_pubkeys().funding_pubkey,
6748 self.context.counterparty_funding_pubkey()
6752 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
6753 if validated.is_err() {
6754 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6757 let funding_redeemscript = self.context.get_funding_redeemscript();
6758 let funding_txo = self.context.get_funding_txo().unwrap();
6759 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6760 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6761 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6762 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6763 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6764 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6765 shutdown_script, self.context.get_holder_selected_contest_delay(),
6766 &self.context.destination_script, (funding_txo, funding_txo_script),
6767 &self.context.channel_transaction_parameters,
6768 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6770 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6771 channel_monitor.provide_initial_counterparty_commitment_tx(
6772 counterparty_initial_bitcoin_tx.txid, Vec::new(),
6773 self.context.cur_counterparty_commitment_transaction_number,
6774 self.context.counterparty_cur_commitment_point.unwrap(),
6775 counterparty_initial_commitment_tx.feerate_per_kw(),
6776 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6777 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6779 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
6780 if self.context.is_batch_funding() {
6781 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
6783 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
6785 self.context.cur_holder_commitment_transaction_number -= 1;
6786 self.context.cur_counterparty_commitment_transaction_number -= 1;
6788 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
6790 let mut channel = Channel { context: self.context };
6792 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6793 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6794 Ok((channel, channel_monitor))
6797 /// Indicates that the signer may have some signatures for us, so we should retry if we're
6800 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6801 if self.context.signer_pending_funding && self.context.is_outbound() {
6802 log_trace!(logger, "Signer unblocked a funding_created");
6803 self.get_funding_created_msg(logger)
6808 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6809 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6810 pub context: ChannelContext<SP>,
6811 pub unfunded_context: UnfundedChannelContext,
6814 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6815 /// Creates a new channel from a remote sides' request for one.
6816 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6817 pub fn new<ES: Deref, F: Deref, L: Deref>(
6818 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6819 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6820 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6821 current_chain_height: u32, logger: &L, is_0conf: bool,
6822 ) -> Result<InboundV1Channel<SP>, ChannelError>
6823 where ES::Target: EntropySource,
6824 F::Target: FeeEstimator,
6827 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6828 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6830 // First check the channel type is known, failing before we do anything else if we don't
6831 // support this channel type.
6832 let channel_type = if let Some(channel_type) = &msg.channel_type {
6833 if channel_type.supports_any_optional_bits() {
6834 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6837 // We only support the channel types defined by the `ChannelManager` in
6838 // `provided_channel_type_features`. The channel type must always support
6839 // `static_remote_key`.
6840 if !channel_type.requires_static_remote_key() {
6841 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6843 // Make sure we support all of the features behind the channel type.
6844 if !channel_type.is_subset(our_supported_features) {
6845 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6847 if channel_type.requires_scid_privacy() && announced_channel {
6848 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6850 channel_type.clone()
6852 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6853 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6854 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6859 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6860 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6861 let pubkeys = holder_signer.pubkeys().clone();
6862 let counterparty_pubkeys = ChannelPublicKeys {
6863 funding_pubkey: msg.funding_pubkey,
6864 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6865 payment_point: msg.payment_point,
6866 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6867 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6870 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6871 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6874 // Check sanity of message fields:
6875 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6876 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6878 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6879 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6881 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6882 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6884 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6885 if msg.push_msat > full_channel_value_msat {
6886 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6888 if msg.dust_limit_satoshis > msg.funding_satoshis {
6889 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6891 if msg.htlc_minimum_msat >= full_channel_value_msat {
6892 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6894 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6896 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6897 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6898 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6900 if msg.max_accepted_htlcs < 1 {
6901 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6903 if msg.max_accepted_htlcs > MAX_HTLCS {
6904 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6907 // Now check against optional parameters as set by config...
6908 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6909 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6911 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6912 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6914 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6915 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6917 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6918 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6920 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6921 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6923 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6924 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6926 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6927 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6930 // Convert things into internal flags and prep our state:
6932 if config.channel_handshake_limits.force_announced_channel_preference {
6933 if config.channel_handshake_config.announced_channel != announced_channel {
6934 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6938 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6939 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6940 // Protocol level safety check in place, although it should never happen because
6941 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6942 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6944 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6945 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6947 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6948 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6949 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6951 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6952 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6955 // check if the funder's amount for the initial commitment tx is sufficient
6956 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6957 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6958 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6962 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6963 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6964 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6965 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6968 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6969 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6970 // want to push much to us), our counterparty should always have more than our reserve.
6971 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6972 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6975 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6976 match &msg.shutdown_scriptpubkey {
6977 &Some(ref script) => {
6978 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6979 if script.len() == 0 {
6982 if !script::is_bolt2_compliant(&script, their_features) {
6983 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6985 Some(script.clone())
6988 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6990 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6995 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6996 match signer_provider.get_shutdown_scriptpubkey() {
6997 Ok(scriptpubkey) => Some(scriptpubkey),
6998 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
7002 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
7003 if !shutdown_scriptpubkey.is_compatible(&their_features) {
7004 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
7008 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
7009 Ok(script) => script,
7010 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
7013 let mut secp_ctx = Secp256k1::new();
7014 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7016 let minimum_depth = if is_0conf {
7019 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
7023 context: ChannelContext {
7026 config: LegacyChannelConfig {
7027 options: config.channel_config.clone(),
7029 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
7034 inbound_handshake_limits_override: None,
7036 temporary_channel_id: Some(msg.temporary_channel_id),
7037 channel_id: msg.temporary_channel_id,
7038 channel_state: ChannelState::NegotiatingFunding(
7039 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7041 announcement_sigs_state: AnnouncementSigsState::NotSent,
7044 latest_monitor_update_id: 0,
7046 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7047 shutdown_scriptpubkey,
7050 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7051 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7052 value_to_self_msat: msg.push_msat,
7054 pending_inbound_htlcs: Vec::new(),
7055 pending_outbound_htlcs: Vec::new(),
7056 holding_cell_htlc_updates: Vec::new(),
7057 pending_update_fee: None,
7058 holding_cell_update_fee: None,
7059 next_holder_htlc_id: 0,
7060 next_counterparty_htlc_id: 0,
7061 update_time_counter: 1,
7063 resend_order: RAACommitmentOrder::CommitmentFirst,
7065 monitor_pending_channel_ready: false,
7066 monitor_pending_revoke_and_ack: false,
7067 monitor_pending_commitment_signed: false,
7068 monitor_pending_forwards: Vec::new(),
7069 monitor_pending_failures: Vec::new(),
7070 monitor_pending_finalized_fulfills: Vec::new(),
7072 signer_pending_commitment_update: false,
7073 signer_pending_funding: false,
7075 #[cfg(debug_assertions)]
7076 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7077 #[cfg(debug_assertions)]
7078 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7080 last_sent_closing_fee: None,
7081 pending_counterparty_closing_signed: None,
7082 expecting_peer_commitment_signed: false,
7083 closing_fee_limits: None,
7084 target_closing_feerate_sats_per_kw: None,
7086 funding_tx_confirmed_in: None,
7087 funding_tx_confirmation_height: 0,
7088 short_channel_id: None,
7089 channel_creation_height: current_chain_height,
7091 feerate_per_kw: msg.feerate_per_kw,
7092 channel_value_satoshis: msg.funding_satoshis,
7093 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
7094 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7095 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
7096 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
7097 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7098 holder_selected_channel_reserve_satoshis,
7099 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
7100 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7101 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
7102 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7105 counterparty_forwarding_info: None,
7107 channel_transaction_parameters: ChannelTransactionParameters {
7108 holder_pubkeys: pubkeys,
7109 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7110 is_outbound_from_holder: false,
7111 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7112 selected_contest_delay: msg.to_self_delay,
7113 pubkeys: counterparty_pubkeys,
7115 funding_outpoint: None,
7116 channel_type_features: channel_type.clone()
7118 funding_transaction: None,
7119 is_batch_funding: None,
7121 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7122 counterparty_prev_commitment_point: None,
7123 counterparty_node_id,
7125 counterparty_shutdown_scriptpubkey,
7127 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7129 channel_update_status: ChannelUpdateStatus::Enabled,
7130 closing_signed_in_flight: false,
7132 announcement_sigs: None,
7134 #[cfg(any(test, fuzzing))]
7135 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7136 #[cfg(any(test, fuzzing))]
7137 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7139 workaround_lnd_bug_4006: None,
7140 sent_message_awaiting_response: None,
7142 latest_inbound_scid_alias: None,
7143 outbound_scid_alias: 0,
7145 channel_pending_event_emitted: false,
7146 channel_ready_event_emitted: false,
7148 #[cfg(any(test, fuzzing))]
7149 historical_inbound_htlc_fulfills: HashSet::new(),
7154 blocked_monitor_updates: Vec::new(),
7156 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7162 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7163 /// should be sent back to the counterparty node.
7165 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7166 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7167 if self.context.is_outbound() {
7168 panic!("Tried to send accept_channel for an outbound channel?");
7171 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7172 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7174 panic!("Tried to send accept_channel after channel had moved forward");
7176 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7177 panic!("Tried to send an accept_channel for a channel that has already advanced");
7180 self.generate_accept_channel_message()
7183 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7184 /// inbound channel. If the intention is to accept an inbound channel, use
7185 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7187 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7188 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7189 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7190 let keys = self.context.get_holder_pubkeys();
7192 msgs::AcceptChannel {
7193 temporary_channel_id: self.context.channel_id,
7194 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7195 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7196 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7197 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7198 minimum_depth: self.context.minimum_depth.unwrap(),
7199 to_self_delay: self.context.get_holder_selected_contest_delay(),
7200 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7201 funding_pubkey: keys.funding_pubkey,
7202 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7203 payment_point: keys.payment_point,
7204 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7205 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7206 first_per_commitment_point,
7207 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7208 Some(script) => script.clone().into_inner(),
7209 None => Builder::new().into_script(),
7211 channel_type: Some(self.context.channel_type.clone()),
7213 next_local_nonce: None,
7217 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7218 /// inbound channel without accepting it.
7220 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7222 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7223 self.generate_accept_channel_message()
7226 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7227 let funding_script = self.context.get_funding_redeemscript();
7229 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7230 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7231 let trusted_tx = initial_commitment_tx.trust();
7232 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7233 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7234 // They sign the holder commitment transaction...
7235 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7236 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7237 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7238 encode::serialize_hex(&funding_script), &self.context.channel_id());
7239 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7241 Ok(initial_commitment_tx)
7244 pub fn funding_created<L: Deref>(
7245 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7246 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7250 if self.context.is_outbound() {
7251 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7254 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7255 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7257 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7258 // remember the channel, so it's safe to just send an error_message here and drop the
7260 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7262 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7263 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7264 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7265 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7268 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7269 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7270 // This is an externally observable change before we finish all our checks. In particular
7271 // check_funding_created_signature may fail.
7272 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7274 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7276 Err(ChannelError::Close(e)) => {
7277 self.context.channel_transaction_parameters.funding_outpoint = None;
7278 return Err((self, ChannelError::Close(e)));
7281 // The only error we know how to handle is ChannelError::Close, so we fall over here
7282 // to make sure we don't continue with an inconsistent state.
7283 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7287 let holder_commitment_tx = HolderCommitmentTransaction::new(
7288 initial_commitment_tx,
7291 &self.context.get_holder_pubkeys().funding_pubkey,
7292 self.context.counterparty_funding_pubkey()
7295 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7296 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7299 // Now that we're past error-generating stuff, update our local state:
7301 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7302 self.context.channel_id = funding_txo.to_channel_id();
7303 self.context.cur_counterparty_commitment_transaction_number -= 1;
7304 self.context.cur_holder_commitment_transaction_number -= 1;
7306 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7308 let funding_redeemscript = self.context.get_funding_redeemscript();
7309 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7310 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7311 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7312 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7313 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7314 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7315 shutdown_script, self.context.get_holder_selected_contest_delay(),
7316 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7317 &self.context.channel_transaction_parameters,
7318 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7320 holder_commitment_tx, best_block, self.context.counterparty_node_id);
7321 channel_monitor.provide_initial_counterparty_commitment_tx(
7322 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7323 self.context.cur_counterparty_commitment_transaction_number + 1,
7324 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7325 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7326 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7328 log_info!(logger, "{} funding_signed for peer for channel {}",
7329 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7331 // Promote the channel to a full-fledged one now that we have updated the state and have a
7332 // `ChannelMonitor`.
7333 let mut channel = Channel {
7334 context: self.context,
7336 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7337 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7339 Ok((channel, funding_signed, channel_monitor))
7343 const SERIALIZATION_VERSION: u8 = 3;
7344 const MIN_SERIALIZATION_VERSION: u8 = 3;
7346 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7352 impl Writeable for ChannelUpdateStatus {
7353 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7354 // We only care about writing out the current state as it was announced, ie only either
7355 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7356 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7358 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7359 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7360 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7361 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7367 impl Readable for ChannelUpdateStatus {
7368 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7369 Ok(match <u8 as Readable>::read(reader)? {
7370 0 => ChannelUpdateStatus::Enabled,
7371 1 => ChannelUpdateStatus::Disabled,
7372 _ => return Err(DecodeError::InvalidValue),
7377 impl Writeable for AnnouncementSigsState {
7378 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7379 // We only care about writing out the current state as if we had just disconnected, at
7380 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7382 AnnouncementSigsState::NotSent => 0u8.write(writer),
7383 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7384 AnnouncementSigsState::Committed => 0u8.write(writer),
7385 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7390 impl Readable for AnnouncementSigsState {
7391 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7392 Ok(match <u8 as Readable>::read(reader)? {
7393 0 => AnnouncementSigsState::NotSent,
7394 1 => AnnouncementSigsState::PeerReceived,
7395 _ => return Err(DecodeError::InvalidValue),
7400 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7401 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7402 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7405 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7407 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7408 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7409 // the low bytes now and the optional high bytes later.
7410 let user_id_low = self.context.user_id as u64;
7411 user_id_low.write(writer)?;
7413 // Version 1 deserializers expected to read parts of the config object here. Version 2
7414 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7415 // `minimum_depth` we simply write dummy values here.
7416 writer.write_all(&[0; 8])?;
7418 self.context.channel_id.write(writer)?;
7420 let mut channel_state = self.context.channel_state;
7421 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7422 channel_state.set_peer_disconnected();
7424 channel_state.to_u32().write(writer)?;
7426 self.context.channel_value_satoshis.write(writer)?;
7428 self.context.latest_monitor_update_id.write(writer)?;
7430 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7431 // deserialized from that format.
7432 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7433 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7434 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7436 self.context.destination_script.write(writer)?;
7438 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7439 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7440 self.context.value_to_self_msat.write(writer)?;
7442 let mut dropped_inbound_htlcs = 0;
7443 for htlc in self.context.pending_inbound_htlcs.iter() {
7444 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7445 dropped_inbound_htlcs += 1;
7448 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7449 for htlc in self.context.pending_inbound_htlcs.iter() {
7450 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7453 htlc.htlc_id.write(writer)?;
7454 htlc.amount_msat.write(writer)?;
7455 htlc.cltv_expiry.write(writer)?;
7456 htlc.payment_hash.write(writer)?;
7458 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7459 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7461 htlc_state.write(writer)?;
7463 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7465 htlc_state.write(writer)?;
7467 &InboundHTLCState::Committed => {
7470 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7472 removal_reason.write(writer)?;
7477 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7478 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7479 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7481 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7482 for htlc in self.context.pending_outbound_htlcs.iter() {
7483 htlc.htlc_id.write(writer)?;
7484 htlc.amount_msat.write(writer)?;
7485 htlc.cltv_expiry.write(writer)?;
7486 htlc.payment_hash.write(writer)?;
7487 htlc.source.write(writer)?;
7489 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7491 onion_packet.write(writer)?;
7493 &OutboundHTLCState::Committed => {
7496 &OutboundHTLCState::RemoteRemoved(_) => {
7497 // Treat this as a Committed because we haven't received the CS - they'll
7498 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7501 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7503 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7504 preimages.push(preimage);
7506 let reason: Option<&HTLCFailReason> = outcome.into();
7507 reason.write(writer)?;
7509 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7511 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7512 preimages.push(preimage);
7514 let reason: Option<&HTLCFailReason> = outcome.into();
7515 reason.write(writer)?;
7518 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7519 pending_outbound_blinding_points.push(htlc.blinding_point);
7522 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7523 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7524 // Vec of (htlc_id, failure_code, sha256_of_onion)
7525 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
7526 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7527 for update in self.context.holding_cell_htlc_updates.iter() {
7529 &HTLCUpdateAwaitingACK::AddHTLC {
7530 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7531 blinding_point, skimmed_fee_msat,
7534 amount_msat.write(writer)?;
7535 cltv_expiry.write(writer)?;
7536 payment_hash.write(writer)?;
7537 source.write(writer)?;
7538 onion_routing_packet.write(writer)?;
7540 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7541 holding_cell_blinding_points.push(blinding_point);
7543 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7545 payment_preimage.write(writer)?;
7546 htlc_id.write(writer)?;
7548 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7550 htlc_id.write(writer)?;
7551 err_packet.write(writer)?;
7553 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
7554 htlc_id, failure_code, sha256_of_onion
7556 // We don't want to break downgrading by adding a new variant, so write a dummy
7557 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
7558 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
7560 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
7562 htlc_id.write(writer)?;
7563 dummy_err_packet.write(writer)?;
7568 match self.context.resend_order {
7569 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7570 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7573 self.context.monitor_pending_channel_ready.write(writer)?;
7574 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7575 self.context.monitor_pending_commitment_signed.write(writer)?;
7577 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7578 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7579 pending_forward.write(writer)?;
7580 htlc_id.write(writer)?;
7583 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7584 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7585 htlc_source.write(writer)?;
7586 payment_hash.write(writer)?;
7587 fail_reason.write(writer)?;
7590 if self.context.is_outbound() {
7591 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7592 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7593 Some(feerate).write(writer)?;
7595 // As for inbound HTLCs, if the update was only announced and never committed in a
7596 // commitment_signed, drop it.
7597 None::<u32>.write(writer)?;
7599 self.context.holding_cell_update_fee.write(writer)?;
7601 self.context.next_holder_htlc_id.write(writer)?;
7602 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7603 self.context.update_time_counter.write(writer)?;
7604 self.context.feerate_per_kw.write(writer)?;
7606 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7607 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7608 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7609 // consider the stale state on reload.
7612 self.context.funding_tx_confirmed_in.write(writer)?;
7613 self.context.funding_tx_confirmation_height.write(writer)?;
7614 self.context.short_channel_id.write(writer)?;
7616 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7617 self.context.holder_dust_limit_satoshis.write(writer)?;
7618 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7620 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7621 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7623 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7624 self.context.holder_htlc_minimum_msat.write(writer)?;
7625 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7627 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7628 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7630 match &self.context.counterparty_forwarding_info {
7633 info.fee_base_msat.write(writer)?;
7634 info.fee_proportional_millionths.write(writer)?;
7635 info.cltv_expiry_delta.write(writer)?;
7637 None => 0u8.write(writer)?
7640 self.context.channel_transaction_parameters.write(writer)?;
7641 self.context.funding_transaction.write(writer)?;
7643 self.context.counterparty_cur_commitment_point.write(writer)?;
7644 self.context.counterparty_prev_commitment_point.write(writer)?;
7645 self.context.counterparty_node_id.write(writer)?;
7647 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7649 self.context.commitment_secrets.write(writer)?;
7651 self.context.channel_update_status.write(writer)?;
7653 #[cfg(any(test, fuzzing))]
7654 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7655 #[cfg(any(test, fuzzing))]
7656 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7657 htlc.write(writer)?;
7660 // If the channel type is something other than only-static-remote-key, then we need to have
7661 // older clients fail to deserialize this channel at all. If the type is
7662 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7664 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7665 Some(&self.context.channel_type) } else { None };
7667 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7668 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7669 // a different percentage of the channel value then 10%, which older versions of LDK used
7670 // to set it to before the percentage was made configurable.
7671 let serialized_holder_selected_reserve =
7672 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7673 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7675 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7676 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7677 let serialized_holder_htlc_max_in_flight =
7678 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7679 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7681 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7682 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7684 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7685 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7686 // we write the high bytes as an option here.
7687 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7689 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7691 write_tlv_fields!(writer, {
7692 (0, self.context.announcement_sigs, option),
7693 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7694 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7695 // them twice, once with their original default values above, and once as an option
7696 // here. On the read side, old versions will simply ignore the odd-type entries here,
7697 // and new versions map the default values to None and allow the TLV entries here to
7699 (1, self.context.minimum_depth, option),
7700 (2, chan_type, option),
7701 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7702 (4, serialized_holder_selected_reserve, option),
7703 (5, self.context.config, required),
7704 (6, serialized_holder_htlc_max_in_flight, option),
7705 (7, self.context.shutdown_scriptpubkey, option),
7706 (8, self.context.blocked_monitor_updates, optional_vec),
7707 (9, self.context.target_closing_feerate_sats_per_kw, option),
7708 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7709 (13, self.context.channel_creation_height, required),
7710 (15, preimages, required_vec),
7711 (17, self.context.announcement_sigs_state, required),
7712 (19, self.context.latest_inbound_scid_alias, option),
7713 (21, self.context.outbound_scid_alias, required),
7714 (23, channel_ready_event_emitted, option),
7715 (25, user_id_high_opt, option),
7716 (27, self.context.channel_keys_id, required),
7717 (28, holder_max_accepted_htlcs, option),
7718 (29, self.context.temporary_channel_id, option),
7719 (31, channel_pending_event_emitted, option),
7720 (35, pending_outbound_skimmed_fees, optional_vec),
7721 (37, holding_cell_skimmed_fees, optional_vec),
7722 (38, self.context.is_batch_funding, option),
7723 (39, pending_outbound_blinding_points, optional_vec),
7724 (41, holding_cell_blinding_points, optional_vec),
7725 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
7732 const MAX_ALLOC_SIZE: usize = 64*1024;
7733 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7735 ES::Target: EntropySource,
7736 SP::Target: SignerProvider
7738 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7739 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7740 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7742 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7743 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7744 // the low bytes now and the high bytes later.
7745 let user_id_low: u64 = Readable::read(reader)?;
7747 let mut config = Some(LegacyChannelConfig::default());
7749 // Read the old serialization of the ChannelConfig from version 0.0.98.
7750 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7751 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7752 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7753 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7755 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7756 let mut _val: u64 = Readable::read(reader)?;
7759 let channel_id = Readable::read(reader)?;
7760 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
7761 let channel_value_satoshis = Readable::read(reader)?;
7763 let latest_monitor_update_id = Readable::read(reader)?;
7765 let mut keys_data = None;
7767 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7768 // the `channel_keys_id` TLV is present below.
7769 let keys_len: u32 = Readable::read(reader)?;
7770 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7771 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7772 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7773 let mut data = [0; 1024];
7774 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7775 reader.read_exact(read_slice)?;
7776 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7780 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7781 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7782 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7785 let destination_script = Readable::read(reader)?;
7787 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7788 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7789 let value_to_self_msat = Readable::read(reader)?;
7791 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7793 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7794 for _ in 0..pending_inbound_htlc_count {
7795 pending_inbound_htlcs.push(InboundHTLCOutput {
7796 htlc_id: Readable::read(reader)?,
7797 amount_msat: Readable::read(reader)?,
7798 cltv_expiry: Readable::read(reader)?,
7799 payment_hash: Readable::read(reader)?,
7800 state: match <u8 as Readable>::read(reader)? {
7801 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7802 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7803 3 => InboundHTLCState::Committed,
7804 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7805 _ => return Err(DecodeError::InvalidValue),
7810 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7811 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7812 for _ in 0..pending_outbound_htlc_count {
7813 pending_outbound_htlcs.push(OutboundHTLCOutput {
7814 htlc_id: Readable::read(reader)?,
7815 amount_msat: Readable::read(reader)?,
7816 cltv_expiry: Readable::read(reader)?,
7817 payment_hash: Readable::read(reader)?,
7818 source: Readable::read(reader)?,
7819 state: match <u8 as Readable>::read(reader)? {
7820 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7821 1 => OutboundHTLCState::Committed,
7823 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7824 OutboundHTLCState::RemoteRemoved(option.into())
7827 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7828 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7831 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7832 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7834 _ => return Err(DecodeError::InvalidValue),
7836 skimmed_fee_msat: None,
7837 blinding_point: None,
7841 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7842 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7843 for _ in 0..holding_cell_htlc_update_count {
7844 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7845 0 => HTLCUpdateAwaitingACK::AddHTLC {
7846 amount_msat: Readable::read(reader)?,
7847 cltv_expiry: Readable::read(reader)?,
7848 payment_hash: Readable::read(reader)?,
7849 source: Readable::read(reader)?,
7850 onion_routing_packet: Readable::read(reader)?,
7851 skimmed_fee_msat: None,
7852 blinding_point: None,
7854 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7855 payment_preimage: Readable::read(reader)?,
7856 htlc_id: Readable::read(reader)?,
7858 2 => HTLCUpdateAwaitingACK::FailHTLC {
7859 htlc_id: Readable::read(reader)?,
7860 err_packet: Readable::read(reader)?,
7862 _ => return Err(DecodeError::InvalidValue),
7866 let resend_order = match <u8 as Readable>::read(reader)? {
7867 0 => RAACommitmentOrder::CommitmentFirst,
7868 1 => RAACommitmentOrder::RevokeAndACKFirst,
7869 _ => return Err(DecodeError::InvalidValue),
7872 let monitor_pending_channel_ready = Readable::read(reader)?;
7873 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7874 let monitor_pending_commitment_signed = Readable::read(reader)?;
7876 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7877 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7878 for _ in 0..monitor_pending_forwards_count {
7879 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7882 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7883 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7884 for _ in 0..monitor_pending_failures_count {
7885 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7888 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7890 let holding_cell_update_fee = Readable::read(reader)?;
7892 let next_holder_htlc_id = Readable::read(reader)?;
7893 let next_counterparty_htlc_id = Readable::read(reader)?;
7894 let update_time_counter = Readable::read(reader)?;
7895 let feerate_per_kw = Readable::read(reader)?;
7897 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7898 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7899 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7900 // consider the stale state on reload.
7901 match <u8 as Readable>::read(reader)? {
7904 let _: u32 = Readable::read(reader)?;
7905 let _: u64 = Readable::read(reader)?;
7906 let _: Signature = Readable::read(reader)?;
7908 _ => return Err(DecodeError::InvalidValue),
7911 let funding_tx_confirmed_in = Readable::read(reader)?;
7912 let funding_tx_confirmation_height = Readable::read(reader)?;
7913 let short_channel_id = Readable::read(reader)?;
7915 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7916 let holder_dust_limit_satoshis = Readable::read(reader)?;
7917 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7918 let mut counterparty_selected_channel_reserve_satoshis = None;
7920 // Read the old serialization from version 0.0.98.
7921 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7923 // Read the 8 bytes of backwards-compatibility data.
7924 let _dummy: u64 = Readable::read(reader)?;
7926 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7927 let holder_htlc_minimum_msat = Readable::read(reader)?;
7928 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7930 let mut minimum_depth = None;
7932 // Read the old serialization from version 0.0.98.
7933 minimum_depth = Some(Readable::read(reader)?);
7935 // Read the 4 bytes of backwards-compatibility data.
7936 let _dummy: u32 = Readable::read(reader)?;
7939 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7941 1 => Some(CounterpartyForwardingInfo {
7942 fee_base_msat: Readable::read(reader)?,
7943 fee_proportional_millionths: Readable::read(reader)?,
7944 cltv_expiry_delta: Readable::read(reader)?,
7946 _ => return Err(DecodeError::InvalidValue),
7949 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7950 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7952 let counterparty_cur_commitment_point = Readable::read(reader)?;
7954 let counterparty_prev_commitment_point = Readable::read(reader)?;
7955 let counterparty_node_id = Readable::read(reader)?;
7957 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7958 let commitment_secrets = Readable::read(reader)?;
7960 let channel_update_status = Readable::read(reader)?;
7962 #[cfg(any(test, fuzzing))]
7963 let mut historical_inbound_htlc_fulfills = HashSet::new();
7964 #[cfg(any(test, fuzzing))]
7966 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7967 for _ in 0..htlc_fulfills_len {
7968 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7972 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7973 Some((feerate, if channel_parameters.is_outbound_from_holder {
7974 FeeUpdateState::Outbound
7976 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7982 let mut announcement_sigs = None;
7983 let mut target_closing_feerate_sats_per_kw = None;
7984 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7985 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7986 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7987 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7988 // only, so we default to that if none was written.
7989 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7990 let mut channel_creation_height = Some(serialized_height);
7991 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7993 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7994 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7995 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7996 let mut latest_inbound_scid_alias = None;
7997 let mut outbound_scid_alias = None;
7998 let mut channel_pending_event_emitted = None;
7999 let mut channel_ready_event_emitted = None;
8001 let mut user_id_high_opt: Option<u64> = None;
8002 let mut channel_keys_id: Option<[u8; 32]> = None;
8003 let mut temporary_channel_id: Option<ChannelId> = None;
8004 let mut holder_max_accepted_htlcs: Option<u16> = None;
8006 let mut blocked_monitor_updates = Some(Vec::new());
8008 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8009 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8011 let mut is_batch_funding: Option<()> = None;
8013 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8014 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8016 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8018 read_tlv_fields!(reader, {
8019 (0, announcement_sigs, option),
8020 (1, minimum_depth, option),
8021 (2, channel_type, option),
8022 (3, counterparty_selected_channel_reserve_satoshis, option),
8023 (4, holder_selected_channel_reserve_satoshis, option),
8024 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8025 (6, holder_max_htlc_value_in_flight_msat, option),
8026 (7, shutdown_scriptpubkey, option),
8027 (8, blocked_monitor_updates, optional_vec),
8028 (9, target_closing_feerate_sats_per_kw, option),
8029 (11, monitor_pending_finalized_fulfills, optional_vec),
8030 (13, channel_creation_height, option),
8031 (15, preimages_opt, optional_vec),
8032 (17, announcement_sigs_state, option),
8033 (19, latest_inbound_scid_alias, option),
8034 (21, outbound_scid_alias, option),
8035 (23, channel_ready_event_emitted, option),
8036 (25, user_id_high_opt, option),
8037 (27, channel_keys_id, option),
8038 (28, holder_max_accepted_htlcs, option),
8039 (29, temporary_channel_id, option),
8040 (31, channel_pending_event_emitted, option),
8041 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8042 (37, holding_cell_skimmed_fees_opt, optional_vec),
8043 (38, is_batch_funding, option),
8044 (39, pending_outbound_blinding_points_opt, optional_vec),
8045 (41, holding_cell_blinding_points_opt, optional_vec),
8046 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8049 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8050 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8051 // If we've gotten to the funding stage of the channel, populate the signer with its
8052 // required channel parameters.
8053 if channel_state >= ChannelState::FundingNegotiated {
8054 holder_signer.provide_channel_parameters(&channel_parameters);
8056 (channel_keys_id, holder_signer)
8058 // `keys_data` can be `None` if we had corrupted data.
8059 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8060 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8061 (holder_signer.channel_keys_id(), holder_signer)
8064 if let Some(preimages) = preimages_opt {
8065 let mut iter = preimages.into_iter();
8066 for htlc in pending_outbound_htlcs.iter_mut() {
8068 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8069 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8071 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8072 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8077 // We expect all preimages to be consumed above
8078 if iter.next().is_some() {
8079 return Err(DecodeError::InvalidValue);
8083 let chan_features = channel_type.as_ref().unwrap();
8084 if !chan_features.is_subset(our_supported_features) {
8085 // If the channel was written by a new version and negotiated with features we don't
8086 // understand yet, refuse to read it.
8087 return Err(DecodeError::UnknownRequiredFeature);
8090 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8091 // To account for that, we're proactively setting/overriding the field here.
8092 channel_parameters.channel_type_features = chan_features.clone();
8094 let mut secp_ctx = Secp256k1::new();
8095 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8097 // `user_id` used to be a single u64 value. In order to remain backwards
8098 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8099 // separate u64 values.
8100 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8102 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8104 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8105 let mut iter = skimmed_fees.into_iter();
8106 for htlc in pending_outbound_htlcs.iter_mut() {
8107 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8109 // We expect all skimmed fees to be consumed above
8110 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8112 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8113 let mut iter = skimmed_fees.into_iter();
8114 for htlc in holding_cell_htlc_updates.iter_mut() {
8115 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8116 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8119 // We expect all skimmed fees to be consumed above
8120 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8122 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8123 let mut iter = blinding_pts.into_iter();
8124 for htlc in pending_outbound_htlcs.iter_mut() {
8125 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8127 // We expect all blinding points to be consumed above
8128 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8130 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8131 let mut iter = blinding_pts.into_iter();
8132 for htlc in holding_cell_htlc_updates.iter_mut() {
8133 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8134 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8137 // We expect all blinding points to be consumed above
8138 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8141 if let Some(malformed_htlcs) = malformed_htlcs {
8142 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
8143 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
8144 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
8145 let matches = *htlc_id == malformed_htlc_id;
8146 if matches { debug_assert!(err_packet.data.is_empty()) }
8149 }).ok_or(DecodeError::InvalidValue)?;
8150 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
8151 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
8153 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
8158 context: ChannelContext {
8161 config: config.unwrap(),
8165 // Note that we don't care about serializing handshake limits as we only ever serialize
8166 // channel data after the handshake has completed.
8167 inbound_handshake_limits_override: None,
8170 temporary_channel_id,
8172 announcement_sigs_state: announcement_sigs_state.unwrap(),
8174 channel_value_satoshis,
8176 latest_monitor_update_id,
8178 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8179 shutdown_scriptpubkey,
8182 cur_holder_commitment_transaction_number,
8183 cur_counterparty_commitment_transaction_number,
8186 holder_max_accepted_htlcs,
8187 pending_inbound_htlcs,
8188 pending_outbound_htlcs,
8189 holding_cell_htlc_updates,
8193 monitor_pending_channel_ready,
8194 monitor_pending_revoke_and_ack,
8195 monitor_pending_commitment_signed,
8196 monitor_pending_forwards,
8197 monitor_pending_failures,
8198 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8200 signer_pending_commitment_update: false,
8201 signer_pending_funding: false,
8204 holding_cell_update_fee,
8205 next_holder_htlc_id,
8206 next_counterparty_htlc_id,
8207 update_time_counter,
8210 #[cfg(debug_assertions)]
8211 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8212 #[cfg(debug_assertions)]
8213 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8215 last_sent_closing_fee: None,
8216 pending_counterparty_closing_signed: None,
8217 expecting_peer_commitment_signed: false,
8218 closing_fee_limits: None,
8219 target_closing_feerate_sats_per_kw,
8221 funding_tx_confirmed_in,
8222 funding_tx_confirmation_height,
8224 channel_creation_height: channel_creation_height.unwrap(),
8226 counterparty_dust_limit_satoshis,
8227 holder_dust_limit_satoshis,
8228 counterparty_max_htlc_value_in_flight_msat,
8229 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8230 counterparty_selected_channel_reserve_satoshis,
8231 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8232 counterparty_htlc_minimum_msat,
8233 holder_htlc_minimum_msat,
8234 counterparty_max_accepted_htlcs,
8237 counterparty_forwarding_info,
8239 channel_transaction_parameters: channel_parameters,
8240 funding_transaction,
8243 counterparty_cur_commitment_point,
8244 counterparty_prev_commitment_point,
8245 counterparty_node_id,
8247 counterparty_shutdown_scriptpubkey,
8251 channel_update_status,
8252 closing_signed_in_flight: false,
8256 #[cfg(any(test, fuzzing))]
8257 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8258 #[cfg(any(test, fuzzing))]
8259 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8261 workaround_lnd_bug_4006: None,
8262 sent_message_awaiting_response: None,
8264 latest_inbound_scid_alias,
8265 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8266 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8268 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8269 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8271 #[cfg(any(test, fuzzing))]
8272 historical_inbound_htlc_fulfills,
8274 channel_type: channel_type.unwrap(),
8277 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8286 use bitcoin::blockdata::constants::ChainHash;
8287 use bitcoin::blockdata::script::{ScriptBuf, Builder};
8288 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8289 use bitcoin::blockdata::opcodes;
8290 use bitcoin::network::constants::Network;
8291 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
8292 use crate::ln::{PaymentHash, PaymentPreimage};
8293 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8294 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8295 use crate::ln::channel::InitFeatures;
8296 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8297 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8298 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8299 use crate::ln::msgs;
8300 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8301 use crate::ln::script::ShutdownScript;
8302 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8303 use crate::chain::BestBlock;
8304 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8305 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8306 use crate::chain::transaction::OutPoint;
8307 use crate::routing::router::{Path, RouteHop};
8308 use crate::util::config::UserConfig;
8309 use crate::util::errors::APIError;
8310 use crate::util::ser::{ReadableArgs, Writeable};
8311 use crate::util::test_utils;
8312 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8313 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8314 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8315 use bitcoin::secp256k1::{SecretKey,PublicKey};
8316 use bitcoin::hashes::sha256::Hash as Sha256;
8317 use bitcoin::hashes::Hash;
8318 use bitcoin::hashes::hex::FromHex;
8319 use bitcoin::hash_types::WPubkeyHash;
8320 use bitcoin::blockdata::locktime::absolute::LockTime;
8321 use bitcoin::address::{WitnessProgram, WitnessVersion};
8322 use crate::prelude::*;
8324 struct TestFeeEstimator {
8327 impl FeeEstimator for TestFeeEstimator {
8328 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8334 fn test_max_funding_satoshis_no_wumbo() {
8335 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8336 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8337 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8341 signer: InMemorySigner,
8344 impl EntropySource for Keys {
8345 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8348 impl SignerProvider for Keys {
8349 type EcdsaSigner = InMemorySigner;
8351 type TaprootSigner = InMemorySigner;
8353 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8354 self.signer.channel_keys_id()
8357 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8361 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8363 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8364 let secp_ctx = Secp256k1::signing_only();
8365 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8366 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8367 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8370 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8371 let secp_ctx = Secp256k1::signing_only();
8372 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8373 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8377 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8378 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8379 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8383 fn upfront_shutdown_script_incompatibility() {
8384 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8385 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8386 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8389 let seed = [42; 32];
8390 let network = Network::Testnet;
8391 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8392 keys_provider.expect(OnGetShutdownScriptpubkey {
8393 returns: non_v0_segwit_shutdown_script.clone(),
8396 let secp_ctx = Secp256k1::new();
8397 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8398 let config = UserConfig::default();
8399 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8400 Err(APIError::IncompatibleShutdownScript { script }) => {
8401 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8403 Err(e) => panic!("Unexpected error: {:?}", e),
8404 Ok(_) => panic!("Expected error"),
8408 // Check that, during channel creation, we use the same feerate in the open channel message
8409 // as we do in the Channel object creation itself.
8411 fn test_open_channel_msg_fee() {
8412 let original_fee = 253;
8413 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8414 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8415 let secp_ctx = Secp256k1::new();
8416 let seed = [42; 32];
8417 let network = Network::Testnet;
8418 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8420 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8421 let config = UserConfig::default();
8422 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8424 // Now change the fee so we can check that the fee in the open_channel message is the
8425 // same as the old fee.
8426 fee_est.fee_est = 500;
8427 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8428 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8432 fn test_holder_vs_counterparty_dust_limit() {
8433 // Test that when calculating the local and remote commitment transaction fees, the correct
8434 // dust limits are used.
8435 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8436 let secp_ctx = Secp256k1::new();
8437 let seed = [42; 32];
8438 let network = Network::Testnet;
8439 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8440 let logger = test_utils::TestLogger::new();
8441 let best_block = BestBlock::from_network(network);
8443 // Go through the flow of opening a channel between two nodes, making sure
8444 // they have different dust limits.
8446 // Create Node A's channel pointing to Node B's pubkey
8447 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8448 let config = UserConfig::default();
8449 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8451 // Create Node B's channel by receiving Node A's open_channel message
8452 // Make sure A's dust limit is as we expect.
8453 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8454 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8455 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8457 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8458 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8459 accept_channel_msg.dust_limit_satoshis = 546;
8460 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8461 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8463 // Node A --> Node B: funding created
8464 let output_script = node_a_chan.context.get_funding_redeemscript();
8465 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8466 value: 10000000, script_pubkey: output_script.clone(),
8468 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8469 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8470 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8472 // Node B --> Node A: funding signed
8473 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8474 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8476 // Put some inbound and outbound HTLCs in A's channel.
8477 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8478 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8480 amount_msat: htlc_amount_msat,
8481 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8482 cltv_expiry: 300000000,
8483 state: InboundHTLCState::Committed,
8486 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8488 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8489 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8490 cltv_expiry: 200000000,
8491 state: OutboundHTLCState::Committed,
8492 source: HTLCSource::OutboundRoute {
8493 path: Path { hops: Vec::new(), blinded_tail: None },
8494 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8495 first_hop_htlc_msat: 548,
8496 payment_id: PaymentId([42; 32]),
8498 skimmed_fee_msat: None,
8499 blinding_point: None,
8502 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8503 // the dust limit check.
8504 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8505 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8506 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8507 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8509 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8510 // of the HTLCs are seen to be above the dust limit.
8511 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8512 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8513 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8514 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8515 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8519 fn test_timeout_vs_success_htlc_dust_limit() {
8520 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8521 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8522 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8523 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8524 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8525 let secp_ctx = Secp256k1::new();
8526 let seed = [42; 32];
8527 let network = Network::Testnet;
8528 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8530 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8531 let config = UserConfig::default();
8532 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8534 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8535 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8537 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8538 // counted as dust when it shouldn't be.
8539 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8540 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8541 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8542 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8544 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8545 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8546 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8547 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8548 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8550 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8552 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8553 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8554 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8555 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8556 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8558 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8559 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8560 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8561 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8562 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8566 fn channel_reestablish_no_updates() {
8567 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8568 let logger = test_utils::TestLogger::new();
8569 let secp_ctx = Secp256k1::new();
8570 let seed = [42; 32];
8571 let network = Network::Testnet;
8572 let best_block = BestBlock::from_network(network);
8573 let chain_hash = ChainHash::using_genesis_block(network);
8574 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8576 // Go through the flow of opening a channel between two nodes.
8578 // Create Node A's channel pointing to Node B's pubkey
8579 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8580 let config = UserConfig::default();
8581 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8583 // Create Node B's channel by receiving Node A's open_channel message
8584 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8585 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8586 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8588 // Node B --> Node A: accept channel
8589 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8590 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8592 // Node A --> Node B: funding created
8593 let output_script = node_a_chan.context.get_funding_redeemscript();
8594 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8595 value: 10000000, script_pubkey: output_script.clone(),
8597 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8598 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8599 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8601 // Node B --> Node A: funding signed
8602 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8603 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8605 // Now disconnect the two nodes and check that the commitment point in
8606 // Node B's channel_reestablish message is sane.
8607 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8608 let msg = node_b_chan.get_channel_reestablish(&&logger);
8609 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8610 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8611 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8613 // Check that the commitment point in Node A's channel_reestablish message
8615 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8616 let msg = node_a_chan.get_channel_reestablish(&&logger);
8617 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8618 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8619 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8623 fn test_configured_holder_max_htlc_value_in_flight() {
8624 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8625 let logger = test_utils::TestLogger::new();
8626 let secp_ctx = Secp256k1::new();
8627 let seed = [42; 32];
8628 let network = Network::Testnet;
8629 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8630 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8631 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8633 let mut config_2_percent = UserConfig::default();
8634 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8635 let mut config_99_percent = UserConfig::default();
8636 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8637 let mut config_0_percent = UserConfig::default();
8638 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8639 let mut config_101_percent = UserConfig::default();
8640 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8642 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8643 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8644 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8645 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8646 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8647 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8649 // Test with the upper bound - 1 of valid values (99%).
8650 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8651 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8652 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8654 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8656 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8657 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8658 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8659 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8660 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8661 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8663 // Test with the upper bound - 1 of valid values (99%).
8664 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8665 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8666 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8668 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8669 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8670 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8671 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8672 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8674 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8675 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8677 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8678 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8679 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8681 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8682 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8683 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8684 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8685 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8687 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8688 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8690 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8691 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8692 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8696 fn test_configured_holder_selected_channel_reserve_satoshis() {
8698 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8699 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8700 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8702 // Test with valid but unreasonably high channel reserves
8703 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8704 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8705 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8707 // Test with calculated channel reserve less than lower bound
8708 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8709 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8711 // Test with invalid channel reserves since sum of both is greater than or equal
8713 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8714 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8717 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8718 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8719 let logger = test_utils::TestLogger::new();
8720 let secp_ctx = Secp256k1::new();
8721 let seed = [42; 32];
8722 let network = Network::Testnet;
8723 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8724 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8725 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8728 let mut outbound_node_config = UserConfig::default();
8729 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8730 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8732 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8733 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8735 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8736 let mut inbound_node_config = UserConfig::default();
8737 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8739 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8740 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8742 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8744 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8745 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8747 // Channel Negotiations failed
8748 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8749 assert!(result.is_err());
8754 fn channel_update() {
8755 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8756 let logger = test_utils::TestLogger::new();
8757 let secp_ctx = Secp256k1::new();
8758 let seed = [42; 32];
8759 let network = Network::Testnet;
8760 let best_block = BestBlock::from_network(network);
8761 let chain_hash = ChainHash::using_genesis_block(network);
8762 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8764 // Create Node A's channel pointing to Node B's pubkey
8765 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8766 let config = UserConfig::default();
8767 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8769 // Create Node B's channel by receiving Node A's open_channel message
8770 // Make sure A's dust limit is as we expect.
8771 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8772 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8773 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8775 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8776 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8777 accept_channel_msg.dust_limit_satoshis = 546;
8778 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8779 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8781 // Node A --> Node B: funding created
8782 let output_script = node_a_chan.context.get_funding_redeemscript();
8783 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8784 value: 10000000, script_pubkey: output_script.clone(),
8786 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8787 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8788 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8790 // Node B --> Node A: funding signed
8791 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8792 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8794 // Make sure that receiving a channel update will update the Channel as expected.
8795 let update = ChannelUpdate {
8796 contents: UnsignedChannelUpdate {
8798 short_channel_id: 0,
8801 cltv_expiry_delta: 100,
8802 htlc_minimum_msat: 5,
8803 htlc_maximum_msat: MAX_VALUE_MSAT,
8805 fee_proportional_millionths: 11,
8806 excess_data: Vec::new(),
8808 signature: Signature::from(unsafe { FFISignature::new() })
8810 assert!(node_a_chan.channel_update(&update).unwrap());
8812 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8813 // change our official htlc_minimum_msat.
8814 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8815 match node_a_chan.context.counterparty_forwarding_info() {
8817 assert_eq!(info.cltv_expiry_delta, 100);
8818 assert_eq!(info.fee_base_msat, 110);
8819 assert_eq!(info.fee_proportional_millionths, 11);
8821 None => panic!("expected counterparty forwarding info to be Some")
8824 assert!(!node_a_chan.channel_update(&update).unwrap());
8828 fn blinding_point_skimmed_fee_malformed_ser() {
8829 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
8831 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8832 let secp_ctx = Secp256k1::new();
8833 let seed = [42; 32];
8834 let network = Network::Testnet;
8835 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8837 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8838 let config = UserConfig::default();
8839 let features = channelmanager::provided_init_features(&config);
8840 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8841 let mut chan = Channel { context: outbound_chan.context };
8843 let dummy_htlc_source = HTLCSource::OutboundRoute {
8845 hops: vec![RouteHop {
8846 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8847 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8848 cltv_expiry_delta: 0, maybe_announced_channel: false,
8852 session_priv: test_utils::privkey(42),
8853 first_hop_htlc_msat: 0,
8854 payment_id: PaymentId([42; 32]),
8856 let dummy_outbound_output = OutboundHTLCOutput {
8859 payment_hash: PaymentHash([43; 32]),
8861 state: OutboundHTLCState::Committed,
8862 source: dummy_htlc_source.clone(),
8863 skimmed_fee_msat: None,
8864 blinding_point: None,
8866 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8867 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8869 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8872 htlc.skimmed_fee_msat = Some(1);
8875 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8877 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8880 payment_hash: PaymentHash([43; 32]),
8881 source: dummy_htlc_source.clone(),
8882 onion_routing_packet: msgs::OnionPacket {
8884 public_key: Ok(test_utils::pubkey(1)),
8885 hop_data: [0; 20*65],
8888 skimmed_fee_msat: None,
8889 blinding_point: None,
8891 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8892 payment_preimage: PaymentPreimage([42; 32]),
8895 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
8896 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
8898 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
8899 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
8901 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
8904 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8905 } else if i % 5 == 1 {
8906 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8907 } else if i % 5 == 2 {
8908 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8909 if let HTLCUpdateAwaitingACK::AddHTLC {
8910 ref mut blinding_point, ref mut skimmed_fee_msat, ..
8911 } = &mut dummy_add {
8912 *blinding_point = Some(test_utils::pubkey(42 + i));
8913 *skimmed_fee_msat = Some(42);
8915 holding_cell_htlc_updates.push(dummy_add);
8916 } else if i % 5 == 3 {
8917 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
8919 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
8922 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8924 // Encode and decode the channel and ensure that the HTLCs within are the same.
8925 let encoded_chan = chan.encode();
8926 let mut s = crate::io::Cursor::new(&encoded_chan);
8927 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8928 let features = channelmanager::provided_channel_type_features(&config);
8929 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8930 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8931 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8934 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8936 fn outbound_commitment_test() {
8937 use bitcoin::sighash;
8938 use bitcoin::consensus::encode::serialize;
8939 use bitcoin::sighash::EcdsaSighashType;
8940 use bitcoin::hashes::hex::FromHex;
8941 use bitcoin::hash_types::Txid;
8942 use bitcoin::secp256k1::Message;
8943 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8944 use crate::ln::PaymentPreimage;
8945 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8946 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8947 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8948 use crate::util::logger::Logger;
8949 use crate::sync::Arc;
8950 use core::str::FromStr;
8951 use hex::DisplayHex;
8953 // Test vectors from BOLT 3 Appendices C and F (anchors):
8954 let feeest = TestFeeEstimator{fee_est: 15000};
8955 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
8956 let secp_ctx = Secp256k1::new();
8958 let mut signer = InMemorySigner::new(
8960 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8961 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8962 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8963 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8964 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8966 // These aren't set in the test vectors:
8967 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8973 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8974 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8975 let keys_provider = Keys { signer: signer.clone() };
8977 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8978 let mut config = UserConfig::default();
8979 config.channel_handshake_config.announced_channel = false;
8980 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
8981 chan.context.holder_dust_limit_satoshis = 546;
8982 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8984 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8986 let counterparty_pubkeys = ChannelPublicKeys {
8987 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8988 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
8989 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8990 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
8991 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
8993 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8994 CounterpartyChannelTransactionParameters {
8995 pubkeys: counterparty_pubkeys.clone(),
8996 selected_contest_delay: 144
8998 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8999 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9001 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9002 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9004 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9005 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9007 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9008 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9010 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9011 // derived from a commitment_seed, so instead we copy it here and call
9012 // build_commitment_transaction.
9013 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9014 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9015 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9016 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9017 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9019 macro_rules! test_commitment {
9020 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9021 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9022 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9026 macro_rules! test_commitment_with_anchors {
9027 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9028 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9029 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9033 macro_rules! test_commitment_common {
9034 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9035 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9037 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9038 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9040 let htlcs = commitment_stats.htlcs_included.drain(..)
9041 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9043 (commitment_stats.tx, htlcs)
9045 let trusted_tx = commitment_tx.trust();
9046 let unsigned_tx = trusted_tx.built_transaction();
9047 let redeemscript = chan.context.get_funding_redeemscript();
9048 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9049 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9050 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9051 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9053 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9054 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9055 let mut counterparty_htlc_sigs = Vec::new();
9056 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9058 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9059 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9060 counterparty_htlc_sigs.push(remote_signature);
9062 assert_eq!(htlcs.len(), per_htlc.len());
9064 let holder_commitment_tx = HolderCommitmentTransaction::new(
9065 commitment_tx.clone(),
9066 counterparty_signature,
9067 counterparty_htlc_sigs,
9068 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9069 chan.context.counterparty_funding_pubkey()
9071 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9072 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9074 let funding_redeemscript = chan.context.get_funding_redeemscript();
9075 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9076 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9078 // ((htlc, counterparty_sig), (index, holder_sig))
9079 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
9082 log_trace!(logger, "verifying htlc {}", $htlc_idx);
9083 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9085 let ref htlc = htlcs[$htlc_idx];
9086 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
9087 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9088 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9089 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9090 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9091 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9092 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9094 let mut preimage: Option<PaymentPreimage> = None;
9097 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9098 if out == htlc.payment_hash {
9099 preimage = Some(PaymentPreimage([i; 32]));
9103 assert!(preimage.is_some());
9106 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
9107 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
9108 channel_derivation_parameters: ChannelDerivationParameters {
9109 value_satoshis: chan.context.channel_value_satoshis,
9110 keys_id: chan.context.channel_keys_id,
9111 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9113 commitment_txid: trusted_tx.txid(),
9114 per_commitment_number: trusted_tx.commitment_number(),
9115 per_commitment_point: trusted_tx.per_commitment_point(),
9116 feerate_per_kw: trusted_tx.feerate_per_kw(),
9118 preimage: preimage.clone(),
9119 counterparty_sig: *htlc_counterparty_sig,
9120 }, &secp_ctx).unwrap();
9121 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9122 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9124 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9125 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9126 let trusted_tx = holder_commitment_tx.trust();
9127 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9128 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9129 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9131 assert!(htlc_counterparty_sig_iter.next().is_none());
9135 // anchors: simple commitment tx with no HTLCs and single anchor
9136 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9137 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9138 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9140 // simple commitment tx with no HTLCs
9141 chan.context.value_to_self_msat = 7000000000;
9143 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9144 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9145 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9147 // anchors: simple commitment tx with no HTLCs
9148 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9149 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9150 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9152 chan.context.pending_inbound_htlcs.push({
9153 let mut out = InboundHTLCOutput{
9155 amount_msat: 1000000,
9157 payment_hash: PaymentHash([0; 32]),
9158 state: InboundHTLCState::Committed,
9160 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9163 chan.context.pending_inbound_htlcs.push({
9164 let mut out = InboundHTLCOutput{
9166 amount_msat: 2000000,
9168 payment_hash: PaymentHash([0; 32]),
9169 state: InboundHTLCState::Committed,
9171 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9174 chan.context.pending_outbound_htlcs.push({
9175 let mut out = OutboundHTLCOutput{
9177 amount_msat: 2000000,
9179 payment_hash: PaymentHash([0; 32]),
9180 state: OutboundHTLCState::Committed,
9181 source: HTLCSource::dummy(),
9182 skimmed_fee_msat: None,
9183 blinding_point: None,
9185 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9188 chan.context.pending_outbound_htlcs.push({
9189 let mut out = OutboundHTLCOutput{
9191 amount_msat: 3000000,
9193 payment_hash: PaymentHash([0; 32]),
9194 state: OutboundHTLCState::Committed,
9195 source: HTLCSource::dummy(),
9196 skimmed_fee_msat: None,
9197 blinding_point: None,
9199 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9202 chan.context.pending_inbound_htlcs.push({
9203 let mut out = InboundHTLCOutput{
9205 amount_msat: 4000000,
9207 payment_hash: PaymentHash([0; 32]),
9208 state: InboundHTLCState::Committed,
9210 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9214 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9215 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9216 chan.context.feerate_per_kw = 0;
9218 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9219 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9220 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9223 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9224 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9225 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9228 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9229 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9230 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9233 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9234 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9235 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9238 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9239 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9240 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9243 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9244 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9245 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9248 // commitment tx with seven outputs untrimmed (maximum feerate)
9249 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9250 chan.context.feerate_per_kw = 647;
9252 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9253 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9254 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9257 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9258 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9259 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9262 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9263 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9264 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9267 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9268 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9269 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9272 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9273 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9274 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9277 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9278 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9279 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9282 // commitment tx with six outputs untrimmed (minimum feerate)
9283 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9284 chan.context.feerate_per_kw = 648;
9286 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9287 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9288 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9291 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9292 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9293 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9296 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9297 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9298 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9301 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9302 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9303 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9306 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9307 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9308 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9311 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9312 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9313 chan.context.feerate_per_kw = 645;
9314 chan.context.holder_dust_limit_satoshis = 1001;
9316 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9317 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9318 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9321 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9322 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9323 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9326 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9327 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9328 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9331 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9332 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9333 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9336 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9337 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9338 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9341 // commitment tx with six outputs untrimmed (maximum feerate)
9342 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9343 chan.context.feerate_per_kw = 2069;
9344 chan.context.holder_dust_limit_satoshis = 546;
9346 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9347 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9348 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9351 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9352 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9353 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9356 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9357 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9358 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9361 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9362 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9363 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9366 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9367 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9368 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9371 // commitment tx with five outputs untrimmed (minimum feerate)
9372 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9373 chan.context.feerate_per_kw = 2070;
9375 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9376 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9377 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9380 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9381 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9382 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9385 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9386 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9387 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9390 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9391 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9392 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9395 // commitment tx with five outputs untrimmed (maximum feerate)
9396 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9397 chan.context.feerate_per_kw = 2194;
9399 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9400 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9401 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9404 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9405 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9406 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9409 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9410 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9411 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9414 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9415 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9416 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9419 // commitment tx with four outputs untrimmed (minimum feerate)
9420 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9421 chan.context.feerate_per_kw = 2195;
9423 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9424 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9425 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9428 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9429 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9430 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9433 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9434 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9435 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9438 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9439 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9440 chan.context.feerate_per_kw = 2185;
9441 chan.context.holder_dust_limit_satoshis = 2001;
9442 let cached_channel_type = chan.context.channel_type;
9443 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9445 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9446 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9447 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9450 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9451 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9452 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9455 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9456 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9457 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9460 // commitment tx with four outputs untrimmed (maximum feerate)
9461 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9462 chan.context.feerate_per_kw = 3702;
9463 chan.context.holder_dust_limit_satoshis = 546;
9464 chan.context.channel_type = cached_channel_type.clone();
9466 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9467 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9468 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9471 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9472 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9473 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9476 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9477 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9478 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9481 // commitment tx with three outputs untrimmed (minimum feerate)
9482 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9483 chan.context.feerate_per_kw = 3703;
9485 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9486 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9487 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9490 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9491 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9492 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9495 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9496 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9497 chan.context.feerate_per_kw = 3687;
9498 chan.context.holder_dust_limit_satoshis = 3001;
9499 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9501 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9502 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9503 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9506 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9507 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9508 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9511 // commitment tx with three outputs untrimmed (maximum feerate)
9512 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9513 chan.context.feerate_per_kw = 4914;
9514 chan.context.holder_dust_limit_satoshis = 546;
9515 chan.context.channel_type = cached_channel_type.clone();
9517 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9518 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9519 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9522 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9523 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9524 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9527 // commitment tx with two outputs untrimmed (minimum feerate)
9528 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9529 chan.context.feerate_per_kw = 4915;
9530 chan.context.holder_dust_limit_satoshis = 546;
9532 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9533 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9534 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9536 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9537 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9538 chan.context.feerate_per_kw = 4894;
9539 chan.context.holder_dust_limit_satoshis = 4001;
9540 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9542 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9543 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9544 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9546 // commitment tx with two outputs untrimmed (maximum feerate)
9547 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9548 chan.context.feerate_per_kw = 9651180;
9549 chan.context.holder_dust_limit_satoshis = 546;
9550 chan.context.channel_type = cached_channel_type.clone();
9552 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9553 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9554 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9556 // commitment tx with one output untrimmed (minimum feerate)
9557 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9558 chan.context.feerate_per_kw = 9651181;
9560 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9561 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9562 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9564 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9565 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9566 chan.context.feerate_per_kw = 6216010;
9567 chan.context.holder_dust_limit_satoshis = 4001;
9568 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9570 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9571 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9572 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9574 // commitment tx with fee greater than funder amount
9575 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9576 chan.context.feerate_per_kw = 9651936;
9577 chan.context.holder_dust_limit_satoshis = 546;
9578 chan.context.channel_type = cached_channel_type;
9580 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9581 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9582 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9584 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9585 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9586 chan.context.feerate_per_kw = 253;
9587 chan.context.pending_inbound_htlcs.clear();
9588 chan.context.pending_inbound_htlcs.push({
9589 let mut out = InboundHTLCOutput{
9591 amount_msat: 2000000,
9593 payment_hash: PaymentHash([0; 32]),
9594 state: InboundHTLCState::Committed,
9596 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9599 chan.context.pending_outbound_htlcs.clear();
9600 chan.context.pending_outbound_htlcs.push({
9601 let mut out = OutboundHTLCOutput{
9603 amount_msat: 5000001,
9605 payment_hash: PaymentHash([0; 32]),
9606 state: OutboundHTLCState::Committed,
9607 source: HTLCSource::dummy(),
9608 skimmed_fee_msat: None,
9609 blinding_point: None,
9611 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9614 chan.context.pending_outbound_htlcs.push({
9615 let mut out = OutboundHTLCOutput{
9617 amount_msat: 5000000,
9619 payment_hash: PaymentHash([0; 32]),
9620 state: OutboundHTLCState::Committed,
9621 source: HTLCSource::dummy(),
9622 skimmed_fee_msat: None,
9623 blinding_point: None,
9625 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9629 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9630 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9631 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9634 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9635 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9636 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9638 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9639 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9640 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9642 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9643 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9644 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9647 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9648 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9649 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9650 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9653 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9654 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9655 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9657 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9658 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9659 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9661 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9662 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9663 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9668 fn test_per_commitment_secret_gen() {
9669 // Test vectors from BOLT 3 Appendix D:
9671 let mut seed = [0; 32];
9672 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9673 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9674 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9676 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9677 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9678 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9680 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9681 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9683 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9684 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9686 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9687 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9688 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9692 fn test_key_derivation() {
9693 // Test vectors from BOLT 3 Appendix E:
9694 let secp_ctx = Secp256k1::new();
9696 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9697 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9699 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9700 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9702 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9703 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9705 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9706 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9708 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9709 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9711 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9712 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9716 fn test_zero_conf_channel_type_support() {
9717 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9718 let secp_ctx = Secp256k1::new();
9719 let seed = [42; 32];
9720 let network = Network::Testnet;
9721 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9722 let logger = test_utils::TestLogger::new();
9724 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9725 let config = UserConfig::default();
9726 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9727 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9729 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9730 channel_type_features.set_zero_conf_required();
9732 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9733 open_channel_msg.channel_type = Some(channel_type_features);
9734 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9735 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9736 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9737 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9738 assert!(res.is_ok());
9742 fn test_supports_anchors_zero_htlc_tx_fee() {
9743 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9744 // resulting `channel_type`.
9745 let secp_ctx = Secp256k1::new();
9746 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9747 let network = Network::Testnet;
9748 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9749 let logger = test_utils::TestLogger::new();
9751 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9752 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9754 let mut config = UserConfig::default();
9755 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9757 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9758 // need to signal it.
9759 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9760 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9761 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9762 &config, 0, 42, None
9764 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9766 let mut expected_channel_type = ChannelTypeFeatures::empty();
9767 expected_channel_type.set_static_remote_key_required();
9768 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9770 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9771 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9772 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9776 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9777 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9778 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9779 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9780 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9783 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9784 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9788 fn test_rejects_implicit_simple_anchors() {
9789 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9790 // each side's `InitFeatures`, it is rejected.
9791 let secp_ctx = Secp256k1::new();
9792 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9793 let network = Network::Testnet;
9794 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9795 let logger = test_utils::TestLogger::new();
9797 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9798 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9800 let config = UserConfig::default();
9802 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9803 let static_remote_key_required: u64 = 1 << 12;
9804 let simple_anchors_required: u64 = 1 << 20;
9805 let raw_init_features = static_remote_key_required | simple_anchors_required;
9806 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9808 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9809 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9810 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9814 // Set `channel_type` to `None` to force the implicit feature negotiation.
9815 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9816 open_channel_msg.channel_type = None;
9818 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9819 // `static_remote_key`, it will fail the channel.
9820 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9821 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9822 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9823 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9825 assert!(channel_b.is_err());
9829 fn test_rejects_simple_anchors_channel_type() {
9830 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9832 let secp_ctx = Secp256k1::new();
9833 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9834 let network = Network::Testnet;
9835 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9836 let logger = test_utils::TestLogger::new();
9838 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9839 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9841 let config = UserConfig::default();
9843 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9844 let static_remote_key_required: u64 = 1 << 12;
9845 let simple_anchors_required: u64 = 1 << 20;
9846 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9847 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9848 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9849 assert!(!simple_anchors_init.requires_unknown_bits());
9850 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9852 // First, we'll try to open a channel between A and B where A requests a channel type for
9853 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9854 // B as it's not supported by LDK.
9855 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9856 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9857 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9861 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9862 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9864 let res = InboundV1Channel::<&TestKeysInterface>::new(
9865 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9866 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9867 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9869 assert!(res.is_err());
9871 // Then, we'll try to open another channel where A requests a channel type for
9872 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9873 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9875 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9876 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9877 10000000, 100000, 42, &config, 0, 42, None
9880 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9882 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9883 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9884 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9885 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9888 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9889 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9891 let res = channel_a.accept_channel(
9892 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9894 assert!(res.is_err());
9898 fn test_waiting_for_batch() {
9899 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9900 let logger = test_utils::TestLogger::new();
9901 let secp_ctx = Secp256k1::new();
9902 let seed = [42; 32];
9903 let network = Network::Testnet;
9904 let best_block = BestBlock::from_network(network);
9905 let chain_hash = ChainHash::using_genesis_block(network);
9906 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9908 let mut config = UserConfig::default();
9909 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9910 // channel in a batch before all channels are ready.
9911 config.channel_handshake_limits.trust_own_funding_0conf = true;
9913 // Create a channel from node a to node b that will be part of batch funding.
9914 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9915 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9920 &channelmanager::provided_init_features(&config),
9930 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9931 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9932 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9937 &channelmanager::provided_channel_type_features(&config),
9938 &channelmanager::provided_init_features(&config),
9944 true, // Allow node b to send a 0conf channel_ready.
9947 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9948 node_a_chan.accept_channel(
9949 &accept_channel_msg,
9950 &config.channel_handshake_limits,
9951 &channelmanager::provided_init_features(&config),
9954 // Fund the channel with a batch funding transaction.
9955 let output_script = node_a_chan.context.get_funding_redeemscript();
9956 let tx = Transaction {
9958 lock_time: LockTime::ZERO,
9962 value: 10000000, script_pubkey: output_script.clone(),
9965 value: 10000000, script_pubkey: Builder::new().into_script(),
9968 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9969 let funding_created_msg = node_a_chan.get_funding_created(
9970 tx.clone(), funding_outpoint, true, &&logger,
9971 ).map_err(|_| ()).unwrap();
9972 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9973 &funding_created_msg.unwrap(),
9977 ).map_err(|_| ()).unwrap();
9978 let node_b_updates = node_b_chan.monitor_updating_restored(
9986 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9987 // broadcasting the funding transaction until the batch is ready.
9988 let res = node_a_chan.funding_signed(
9989 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
9991 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9992 let node_a_updates = node_a_chan.monitor_updating_restored(
9999 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10000 // as the funding transaction depends on all channels in the batch becoming ready.
10001 assert!(node_a_updates.channel_ready.is_none());
10002 assert!(node_a_updates.funding_broadcastable.is_none());
10003 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10005 // It is possible to receive a 0conf channel_ready from the remote node.
10006 node_a_chan.channel_ready(
10007 &node_b_updates.channel_ready.unwrap(),
10015 node_a_chan.context.channel_state,
10016 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10019 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10020 node_a_chan.set_batch_ready();
10021 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10022 assert!(node_a_chan.check_get_channel_ready(0).is_some());