1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
265 sha256_of_onion: [u8; 32],
269 macro_rules! define_state_flags {
270 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
271 #[doc = $flag_type_doc]
272 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
273 struct $flag_type(u32);
278 const $flag: $flag_type = $flag_type($value);
281 /// All flags that apply to the specified [`ChannelState`] variant.
283 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
286 fn new() -> Self { Self(0) }
289 fn from_u32(flags: u32) -> Result<Self, ()> {
290 if flags & !Self::ALL.0 != 0 {
293 Ok($flag_type(flags))
298 fn is_empty(&self) -> bool { self.0 == 0 }
301 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
304 impl core::ops::Not for $flag_type {
306 fn not(self) -> Self::Output { Self(!self.0) }
308 impl core::ops::BitOr for $flag_type {
310 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
312 impl core::ops::BitOrAssign for $flag_type {
313 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
315 impl core::ops::BitAnd for $flag_type {
317 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
319 impl core::ops::BitAndAssign for $flag_type {
320 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
323 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
324 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
326 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
327 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
328 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
330 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
332 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
333 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
335 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
337 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
339 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
340 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
342 impl PartialEq<FundedStateFlags> for $flag_type {
343 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
345 impl From<FundedStateFlags> for $flag_type {
346 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
351 /// We declare all the states/flags here together to help determine which bits are still available
354 pub const OUR_INIT_SENT: u32 = 1 << 0;
355 pub const THEIR_INIT_SENT: u32 = 1 << 1;
356 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
357 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
358 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
359 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
360 pub const CHANNEL_READY: u32 = 1 << 6;
361 pub const PEER_DISCONNECTED: u32 = 1 << 7;
362 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
363 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
364 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
365 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
366 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
367 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
371 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
373 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
374 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
375 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
376 somewhere and we should pause sending any outbound messages until they've managed to \
377 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
378 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
379 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
380 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
381 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
382 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
387 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
388 NegotiatingFundingFlags, [
389 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
390 OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
391 ("Indicates we have received their `open_channel`/`accept_channel` message.",
392 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
397 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
398 FUNDED_STATE, AwaitingChannelReadyFlags, [
399 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
400 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
401 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
402 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
403 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
404 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
405 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
406 is being held until all channels in the batch have received `funding_signed` and have \
407 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
412 "Flags that only apply to [`ChannelState::ChannelReady`].",
413 FUNDED_STATE, ChannelReadyFlags, [
414 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
415 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
416 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
417 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
418 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
422 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
424 /// We are negotiating the parameters required for the channel prior to funding it.
425 NegotiatingFunding(NegotiatingFundingFlags),
426 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
427 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
428 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
430 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
431 /// funding transaction to confirm.
432 AwaitingChannelReady(AwaitingChannelReadyFlags),
433 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
435 ChannelReady(ChannelReadyFlags),
436 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
437 /// is about to drop us, but we store this anyway.
441 macro_rules! impl_state_flag {
442 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
444 fn $get(&self) -> bool {
447 ChannelState::$state(flags) => flags.is_set($state_flag.into()),
456 ChannelState::$state(flags) => *flags |= $state_flag,
458 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
462 fn $clear(&mut self) {
465 ChannelState::$state(flags) => *flags &= !($state_flag),
467 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
471 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
472 impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
474 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
475 impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
480 fn from_u32(state: u32) -> Result<Self, ()> {
482 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
483 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
485 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
486 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
487 .map(|flags| ChannelState::AwaitingChannelReady(flags))
488 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
489 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
490 .map(|flags| ChannelState::ChannelReady(flags))
491 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
492 Ok(ChannelState::NegotiatingFunding(flags))
500 fn to_u32(&self) -> u32 {
502 ChannelState::NegotiatingFunding(flags) => flags.0,
503 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
504 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
505 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
506 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
510 fn is_pre_funded_state(&self) -> bool {
511 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
514 fn is_both_sides_shutdown(&self) -> bool {
515 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
518 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
520 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
521 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
522 _ => FundedStateFlags::new(),
526 fn should_force_holding_cell(&self) -> bool {
528 ChannelState::ChannelReady(flags) =>
529 flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
530 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
531 flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
533 debug_assert!(false, "The holding cell is only valid within ChannelReady");
539 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
540 FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
541 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
542 FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
543 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
544 FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
545 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
546 FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
547 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
548 AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
549 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
550 AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
551 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
552 AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
553 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
554 ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
557 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
559 pub const DEFAULT_MAX_HTLCS: u16 = 50;
561 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
562 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
563 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
564 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
568 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
570 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
572 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
574 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
575 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
576 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
577 /// `holder_max_htlc_value_in_flight_msat`.
578 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
580 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
581 /// `option_support_large_channel` (aka wumbo channels) is not supported.
583 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
585 /// Total bitcoin supply in satoshis.
586 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
588 /// The maximum network dust limit for standard script formats. This currently represents the
589 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
590 /// transaction non-standard and thus refuses to relay it.
591 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
592 /// implementations use this value for their dust limit today.
593 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
595 /// The maximum channel dust limit we will accept from our counterparty.
596 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
598 /// The dust limit is used for both the commitment transaction outputs as well as the closing
599 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
600 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
601 /// In order to avoid having to concern ourselves with standardness during the closing process, we
602 /// simply require our counterparty to use a dust limit which will leave any segwit output
604 /// See <https://github.com/lightning/bolts/issues/905> for more details.
605 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
607 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
608 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
610 /// Used to return a simple Error back to ChannelManager. Will get converted to a
611 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
612 /// channel_id in ChannelManager.
613 pub(super) enum ChannelError {
619 impl fmt::Debug for ChannelError {
620 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
622 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
623 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
624 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
629 impl fmt::Display for ChannelError {
630 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
632 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
633 &ChannelError::Warn(ref e) => write!(f, "{}", e),
634 &ChannelError::Close(ref e) => write!(f, "{}", e),
639 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
641 pub peer_id: Option<PublicKey>,
642 pub channel_id: Option<ChannelId>,
645 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
646 fn log(&self, mut record: Record) {
647 record.peer_id = self.peer_id;
648 record.channel_id = self.channel_id;
649 self.logger.log(record)
653 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
654 where L::Target: Logger {
655 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
656 where S::Target: SignerProvider
660 peer_id: Some(context.counterparty_node_id),
661 channel_id: Some(context.channel_id),
666 macro_rules! secp_check {
667 ($res: expr, $err: expr) => {
670 Err(_) => return Err(ChannelError::Close($err)),
675 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
676 /// our counterparty or not. However, we don't want to announce updates right away to avoid
677 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
678 /// our channel_update message and track the current state here.
679 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
680 #[derive(Clone, Copy, PartialEq)]
681 pub(super) enum ChannelUpdateStatus {
682 /// We've announced the channel as enabled and are connected to our peer.
684 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
686 /// Our channel is live again, but we haven't announced the channel as enabled yet.
688 /// We've announced the channel as disabled.
692 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
694 pub enum AnnouncementSigsState {
695 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
696 /// we sent the last `AnnouncementSignatures`.
698 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
699 /// This state never appears on disk - instead we write `NotSent`.
701 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
702 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
703 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
704 /// they send back a `RevokeAndACK`.
705 /// This state never appears on disk - instead we write `NotSent`.
707 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
708 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
712 /// An enum indicating whether the local or remote side offered a given HTLC.
718 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
721 pending_htlcs_value_msat: u64,
722 on_counterparty_tx_dust_exposure_msat: u64,
723 on_holder_tx_dust_exposure_msat: u64,
724 holding_cell_msat: u64,
725 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
728 /// An enum gathering stats on commitment transaction, either local or remote.
729 struct CommitmentStats<'a> {
730 tx: CommitmentTransaction, // the transaction info
731 feerate_per_kw: u32, // the feerate included to build the transaction
732 total_fee_sat: u64, // the total fee included in the transaction
733 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
734 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
735 local_balance_msat: u64, // local balance before fees *not* considering dust limits
736 remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
737 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
738 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
741 /// Used when calculating whether we or the remote can afford an additional HTLC.
742 struct HTLCCandidate {
744 origin: HTLCInitiator,
748 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
756 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
758 enum UpdateFulfillFetch {
760 monitor_update: ChannelMonitorUpdate,
761 htlc_value_msat: u64,
762 msg: Option<msgs::UpdateFulfillHTLC>,
767 /// The return type of get_update_fulfill_htlc_and_commit.
768 pub enum UpdateFulfillCommitFetch {
769 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
770 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
771 /// previously placed in the holding cell (and has since been removed).
773 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
774 monitor_update: ChannelMonitorUpdate,
775 /// The value of the HTLC which was claimed, in msat.
776 htlc_value_msat: u64,
778 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
779 /// or has been forgotten (presumably previously claimed).
783 /// The return value of `monitor_updating_restored`
784 pub(super) struct MonitorRestoreUpdates {
785 pub raa: Option<msgs::RevokeAndACK>,
786 pub commitment_update: Option<msgs::CommitmentUpdate>,
787 pub order: RAACommitmentOrder,
788 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
789 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
790 pub finalized_claimed_htlcs: Vec<HTLCSource>,
791 pub funding_broadcastable: Option<Transaction>,
792 pub channel_ready: Option<msgs::ChannelReady>,
793 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
796 /// The return value of `signer_maybe_unblocked`
798 pub(super) struct SignerResumeUpdates {
799 pub commitment_update: Option<msgs::CommitmentUpdate>,
800 pub funding_signed: Option<msgs::FundingSigned>,
801 pub channel_ready: Option<msgs::ChannelReady>,
804 /// The return value of `channel_reestablish`
805 pub(super) struct ReestablishResponses {
806 pub channel_ready: Option<msgs::ChannelReady>,
807 pub raa: Option<msgs::RevokeAndACK>,
808 pub commitment_update: Option<msgs::CommitmentUpdate>,
809 pub order: RAACommitmentOrder,
810 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
811 pub shutdown_msg: Option<msgs::Shutdown>,
814 /// The result of a shutdown that should be handled.
816 pub(crate) struct ShutdownResult {
817 /// A channel monitor update to apply.
818 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
819 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
820 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
821 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
822 /// propagated to the remainder of the batch.
823 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
824 pub(crate) channel_id: ChannelId,
825 pub(crate) counterparty_node_id: PublicKey,
828 /// If the majority of the channels funds are to the fundee and the initiator holds only just
829 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
830 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
831 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
832 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
833 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
834 /// by this multiple without hitting this case, before sending.
835 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
836 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
837 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
838 /// leave the channel less usable as we hold a bigger reserve.
839 #[cfg(any(fuzzing, test))]
840 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
841 #[cfg(not(any(fuzzing, test)))]
842 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
844 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
845 /// channel creation on an inbound channel, we simply force-close and move on.
846 /// This constant is the one suggested in BOLT 2.
847 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
849 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
850 /// not have enough balance value remaining to cover the onchain cost of this new
851 /// HTLC weight. If this happens, our counterparty fails the reception of our
852 /// commitment_signed including this new HTLC due to infringement on the channel
854 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
855 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
856 /// leads to a channel force-close. Ultimately, this is an issue coming from the
857 /// design of LN state machines, allowing asynchronous updates.
858 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
860 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
861 /// commitment transaction fees, with at least this many HTLCs present on the commitment
862 /// transaction (not counting the value of the HTLCs themselves).
863 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
865 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
866 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
867 /// ChannelUpdate prompted by the config update. This value was determined as follows:
869 /// * The expected interval between ticks (1 minute).
870 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
871 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
872 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
873 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
875 /// The number of ticks that may elapse while we're waiting for a response to a
876 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
879 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
880 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
882 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
883 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
884 /// exceeding this age limit will be force-closed and purged from memory.
885 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
887 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
888 pub(crate) const COINBASE_MATURITY: u32 = 100;
890 struct PendingChannelMonitorUpdate {
891 update: ChannelMonitorUpdate,
894 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
895 (0, update, required),
898 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
899 /// its variants containing an appropriate channel struct.
900 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
901 UnfundedOutboundV1(OutboundV1Channel<SP>),
902 UnfundedInboundV1(InboundV1Channel<SP>),
906 impl<'a, SP: Deref> ChannelPhase<SP> where
907 SP::Target: SignerProvider,
908 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
910 pub fn context(&'a self) -> &'a ChannelContext<SP> {
912 ChannelPhase::Funded(chan) => &chan.context,
913 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
914 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
918 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
920 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
921 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
922 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
927 /// Contains all state common to unfunded inbound/outbound channels.
928 pub(super) struct UnfundedChannelContext {
929 /// A counter tracking how many ticks have elapsed since this unfunded channel was
930 /// created. If this unfunded channel reaches peer has yet to respond after reaching
931 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
933 /// This is so that we don't keep channels around that haven't progressed to a funded state
934 /// in a timely manner.
935 unfunded_channel_age_ticks: usize,
938 impl UnfundedChannelContext {
939 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
940 /// having reached the unfunded channel age limit.
942 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
943 pub fn should_expire_unfunded_channel(&mut self) -> bool {
944 self.unfunded_channel_age_ticks += 1;
945 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
949 /// Contains everything about the channel including state, and various flags.
950 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
951 config: LegacyChannelConfig,
953 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
954 // constructed using it. The second element in the tuple corresponds to the number of ticks that
955 // have elapsed since the update occurred.
956 prev_config: Option<(ChannelConfig, usize)>,
958 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
962 /// The current channel ID.
963 channel_id: ChannelId,
964 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
965 /// Will be `None` for channels created prior to 0.0.115.
966 temporary_channel_id: Option<ChannelId>,
967 channel_state: ChannelState,
969 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
970 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
972 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
973 // Note that a number of our tests were written prior to the behavior here which retransmits
974 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
976 #[cfg(any(test, feature = "_test_utils"))]
977 pub(crate) announcement_sigs_state: AnnouncementSigsState,
978 #[cfg(not(any(test, feature = "_test_utils")))]
979 announcement_sigs_state: AnnouncementSigsState,
981 secp_ctx: Secp256k1<secp256k1::All>,
982 channel_value_satoshis: u64,
984 latest_monitor_update_id: u64,
986 holder_signer: ChannelSignerType<SP>,
987 shutdown_scriptpubkey: Option<ShutdownScript>,
988 destination_script: ScriptBuf,
990 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
991 // generation start at 0 and count up...this simplifies some parts of implementation at the
992 // cost of others, but should really just be changed.
994 cur_holder_commitment_transaction_number: u64,
995 cur_counterparty_commitment_transaction_number: u64,
996 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
997 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
998 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
999 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1001 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1002 /// need to ensure we resend them in the order we originally generated them. Note that because
1003 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1004 /// sufficient to simply set this to the opposite of any message we are generating as we
1005 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1006 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1008 resend_order: RAACommitmentOrder,
1010 monitor_pending_channel_ready: bool,
1011 monitor_pending_revoke_and_ack: bool,
1012 monitor_pending_commitment_signed: bool,
1014 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1015 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1016 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1017 // but need to handle this somehow or we run the risk of losing HTLCs!
1018 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1019 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1020 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1022 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1023 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1024 /// the future when the signer indicates it may have a signature for us.
1026 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1027 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1028 signer_pending_commitment_update: bool,
1029 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1030 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1031 /// outbound or inbound.
1032 signer_pending_funding: bool,
1034 // pending_update_fee is filled when sending and receiving update_fee.
1036 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1037 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1038 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1039 // HTLCs with similar state.
1040 pending_update_fee: Option<(u32, FeeUpdateState)>,
1041 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1042 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1043 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1044 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1045 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1046 holding_cell_update_fee: Option<u32>,
1047 next_holder_htlc_id: u64,
1048 next_counterparty_htlc_id: u64,
1049 feerate_per_kw: u32,
1051 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1052 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1053 /// new block is received, ensuring it's always at least moderately close to the current real
1055 update_time_counter: u32,
1057 #[cfg(debug_assertions)]
1058 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1059 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1060 #[cfg(debug_assertions)]
1061 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1062 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1064 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1065 target_closing_feerate_sats_per_kw: Option<u32>,
1067 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1068 /// update, we need to delay processing it until later. We do that here by simply storing the
1069 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1070 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1072 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1073 /// transaction. These are set once we reach `closing_negotiation_ready`.
1075 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1077 closing_fee_limits: Option<(u64, u64)>,
1079 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1080 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1081 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1082 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1083 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1085 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1086 /// until we see a `commitment_signed` before doing so.
1088 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1089 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1090 expecting_peer_commitment_signed: bool,
1092 /// The hash of the block in which the funding transaction was included.
1093 funding_tx_confirmed_in: Option<BlockHash>,
1094 funding_tx_confirmation_height: u32,
1095 short_channel_id: Option<u64>,
1096 /// Either the height at which this channel was created or the height at which it was last
1097 /// serialized if it was serialized by versions prior to 0.0.103.
1098 /// We use this to close if funding is never broadcasted.
1099 channel_creation_height: u32,
1101 counterparty_dust_limit_satoshis: u64,
1104 pub(super) holder_dust_limit_satoshis: u64,
1106 holder_dust_limit_satoshis: u64,
1109 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1111 counterparty_max_htlc_value_in_flight_msat: u64,
1114 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1116 holder_max_htlc_value_in_flight_msat: u64,
1118 /// minimum channel reserve for self to maintain - set by them.
1119 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1122 pub(super) holder_selected_channel_reserve_satoshis: u64,
1124 holder_selected_channel_reserve_satoshis: u64,
1126 counterparty_htlc_minimum_msat: u64,
1127 holder_htlc_minimum_msat: u64,
1129 pub counterparty_max_accepted_htlcs: u16,
1131 counterparty_max_accepted_htlcs: u16,
1132 holder_max_accepted_htlcs: u16,
1133 minimum_depth: Option<u32>,
1135 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1137 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1138 funding_transaction: Option<Transaction>,
1139 is_batch_funding: Option<()>,
1141 counterparty_cur_commitment_point: Option<PublicKey>,
1142 counterparty_prev_commitment_point: Option<PublicKey>,
1143 counterparty_node_id: PublicKey,
1145 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1147 commitment_secrets: CounterpartyCommitmentSecrets,
1149 channel_update_status: ChannelUpdateStatus,
1150 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1151 /// not complete within a single timer tick (one minute), we should force-close the channel.
1152 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1154 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1155 /// our peer and start the closing_signed negotiation fresh.
1156 closing_signed_in_flight: bool,
1158 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1159 /// This can be used to rebroadcast the channel_announcement message later.
1160 announcement_sigs: Option<(Signature, Signature)>,
1162 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1163 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1164 // be, by comparing the cached values to the fee of the tranaction generated by
1165 // `build_commitment_transaction`.
1166 #[cfg(any(test, fuzzing))]
1167 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1168 #[cfg(any(test, fuzzing))]
1169 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1171 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1172 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1173 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1174 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1175 /// message until we receive a channel_reestablish.
1177 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1178 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1180 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1181 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1182 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1183 /// unblock the state machine.
1185 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1186 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1187 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1189 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1190 /// [`msgs::RevokeAndACK`] message from the counterparty.
1191 sent_message_awaiting_response: Option<usize>,
1193 #[cfg(any(test, fuzzing))]
1194 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1195 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1196 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1197 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1198 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1199 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1200 historical_inbound_htlc_fulfills: HashSet<u64>,
1202 /// This channel's type, as negotiated during channel open
1203 channel_type: ChannelTypeFeatures,
1205 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1206 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1207 // the channel's funding UTXO.
1209 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1210 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1211 // associated channel mapping.
1213 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1214 // to store all of them.
1215 latest_inbound_scid_alias: Option<u64>,
1217 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1218 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1219 // don't currently support node id aliases and eventually privacy should be provided with
1220 // blinded paths instead of simple scid+node_id aliases.
1221 outbound_scid_alias: u64,
1223 // We track whether we already emitted a `ChannelPending` event.
1224 channel_pending_event_emitted: bool,
1226 // We track whether we already emitted a `ChannelReady` event.
1227 channel_ready_event_emitted: bool,
1229 /// The unique identifier used to re-derive the private key material for the channel through
1230 /// [`SignerProvider::derive_channel_signer`].
1231 channel_keys_id: [u8; 32],
1233 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1234 /// store it here and only release it to the `ChannelManager` once it asks for it.
1235 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1238 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1239 /// Allowed in any state (including after shutdown)
1240 pub fn get_update_time_counter(&self) -> u32 {
1241 self.update_time_counter
1244 pub fn get_latest_monitor_update_id(&self) -> u64 {
1245 self.latest_monitor_update_id
1248 pub fn should_announce(&self) -> bool {
1249 self.config.announced_channel
1252 pub fn is_outbound(&self) -> bool {
1253 self.channel_transaction_parameters.is_outbound_from_holder
1256 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1257 /// Allowed in any state (including after shutdown)
1258 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1259 self.config.options.forwarding_fee_base_msat
1262 /// Returns true if we've ever received a message from the remote end for this Channel
1263 pub fn have_received_message(&self) -> bool {
1264 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1267 /// Returns true if this channel is fully established and not known to be closing.
1268 /// Allowed in any state (including after shutdown)
1269 pub fn is_usable(&self) -> bool {
1270 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1271 !self.channel_state.is_local_shutdown_sent() &&
1272 !self.channel_state.is_remote_shutdown_sent() &&
1273 !self.monitor_pending_channel_ready
1276 /// shutdown state returns the state of the channel in its various stages of shutdown
1277 pub fn shutdown_state(&self) -> ChannelShutdownState {
1278 match self.channel_state {
1279 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1280 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1281 ChannelShutdownState::ShutdownInitiated
1282 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1283 ChannelShutdownState::ResolvingHTLCs
1284 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1285 ChannelShutdownState::NegotiatingClosingFee
1287 ChannelShutdownState::NotShuttingDown
1289 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1290 _ => ChannelShutdownState::NotShuttingDown,
1294 fn closing_negotiation_ready(&self) -> bool {
1295 let is_ready_to_close = match self.channel_state {
1296 ChannelState::AwaitingChannelReady(flags) =>
1297 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1298 ChannelState::ChannelReady(flags) =>
1299 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1302 self.pending_inbound_htlcs.is_empty() &&
1303 self.pending_outbound_htlcs.is_empty() &&
1304 self.pending_update_fee.is_none() &&
1308 /// Returns true if this channel is currently available for use. This is a superset of
1309 /// is_usable() and considers things like the channel being temporarily disabled.
1310 /// Allowed in any state (including after shutdown)
1311 pub fn is_live(&self) -> bool {
1312 self.is_usable() && !self.channel_state.is_peer_disconnected()
1315 // Public utilities:
1317 pub fn channel_id(&self) -> ChannelId {
1321 // Return the `temporary_channel_id` used during channel establishment.
1323 // Will return `None` for channels created prior to LDK version 0.0.115.
1324 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1325 self.temporary_channel_id
1328 pub fn minimum_depth(&self) -> Option<u32> {
1332 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1333 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1334 pub fn get_user_id(&self) -> u128 {
1338 /// Gets the channel's type
1339 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1343 /// Gets the channel's `short_channel_id`.
1345 /// Will return `None` if the channel hasn't been confirmed yet.
1346 pub fn get_short_channel_id(&self) -> Option<u64> {
1347 self.short_channel_id
1350 /// Allowed in any state (including after shutdown)
1351 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1352 self.latest_inbound_scid_alias
1355 /// Allowed in any state (including after shutdown)
1356 pub fn outbound_scid_alias(&self) -> u64 {
1357 self.outbound_scid_alias
1360 /// Returns the holder signer for this channel.
1362 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1363 return &self.holder_signer
1366 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1367 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1368 /// or prior to any channel actions during `Channel` initialization.
1369 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1370 debug_assert_eq!(self.outbound_scid_alias, 0);
1371 self.outbound_scid_alias = outbound_scid_alias;
1374 /// Returns the funding_txo we either got from our peer, or were given by
1375 /// get_funding_created.
1376 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1377 self.channel_transaction_parameters.funding_outpoint
1380 /// Returns the height in which our funding transaction was confirmed.
1381 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1382 let conf_height = self.funding_tx_confirmation_height;
1383 if conf_height > 0 {
1390 /// Returns the block hash in which our funding transaction was confirmed.
1391 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1392 self.funding_tx_confirmed_in
1395 /// Returns the current number of confirmations on the funding transaction.
1396 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1397 if self.funding_tx_confirmation_height == 0 {
1398 // We either haven't seen any confirmation yet, or observed a reorg.
1402 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1405 fn get_holder_selected_contest_delay(&self) -> u16 {
1406 self.channel_transaction_parameters.holder_selected_contest_delay
1409 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1410 &self.channel_transaction_parameters.holder_pubkeys
1413 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1414 self.channel_transaction_parameters.counterparty_parameters
1415 .as_ref().map(|params| params.selected_contest_delay)
1418 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1419 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1422 /// Allowed in any state (including after shutdown)
1423 pub fn get_counterparty_node_id(&self) -> PublicKey {
1424 self.counterparty_node_id
1427 /// Allowed in any state (including after shutdown)
1428 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1429 self.holder_htlc_minimum_msat
1432 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1433 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1434 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1437 /// Allowed in any state (including after shutdown)
1438 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1440 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1441 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1442 // channel might have been used to route very small values (either by honest users or as DoS).
1443 self.channel_value_satoshis * 1000 * 9 / 10,
1445 self.counterparty_max_htlc_value_in_flight_msat
1449 /// Allowed in any state (including after shutdown)
1450 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1451 self.counterparty_htlc_minimum_msat
1454 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1455 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1456 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1459 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1460 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1461 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1463 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1464 party_max_htlc_value_in_flight_msat
1469 pub fn get_value_satoshis(&self) -> u64 {
1470 self.channel_value_satoshis
1473 pub fn get_fee_proportional_millionths(&self) -> u32 {
1474 self.config.options.forwarding_fee_proportional_millionths
1477 pub fn get_cltv_expiry_delta(&self) -> u16 {
1478 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1481 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1482 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1483 where F::Target: FeeEstimator
1485 match self.config.options.max_dust_htlc_exposure {
1486 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1487 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1488 ConfirmationTarget::OnChainSweep) as u64;
1489 feerate_per_kw.saturating_mul(multiplier)
1491 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1495 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1496 pub fn prev_config(&self) -> Option<ChannelConfig> {
1497 self.prev_config.map(|prev_config| prev_config.0)
1500 // Checks whether we should emit a `ChannelPending` event.
1501 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1502 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1505 // Returns whether we already emitted a `ChannelPending` event.
1506 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1507 self.channel_pending_event_emitted
1510 // Remembers that we already emitted a `ChannelPending` event.
1511 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1512 self.channel_pending_event_emitted = true;
1515 // Checks whether we should emit a `ChannelReady` event.
1516 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1517 self.is_usable() && !self.channel_ready_event_emitted
1520 // Remembers that we already emitted a `ChannelReady` event.
1521 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1522 self.channel_ready_event_emitted = true;
1525 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1526 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1527 /// no longer be considered when forwarding HTLCs.
1528 pub fn maybe_expire_prev_config(&mut self) {
1529 if self.prev_config.is_none() {
1532 let prev_config = self.prev_config.as_mut().unwrap();
1534 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1535 self.prev_config = None;
1539 /// Returns the current [`ChannelConfig`] applied to the channel.
1540 pub fn config(&self) -> ChannelConfig {
1544 /// Updates the channel's config. A bool is returned indicating whether the config update
1545 /// applied resulted in a new ChannelUpdate message.
1546 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1547 let did_channel_update =
1548 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1549 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1550 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1551 if did_channel_update {
1552 self.prev_config = Some((self.config.options, 0));
1553 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1554 // policy change to propagate throughout the network.
1555 self.update_time_counter += 1;
1557 self.config.options = *config;
1561 /// Returns true if funding_signed was sent/received and the
1562 /// funding transaction has been broadcast if necessary.
1563 pub fn is_funding_broadcast(&self) -> bool {
1564 !self.channel_state.is_pre_funded_state() &&
1565 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1568 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1569 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1570 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1571 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1572 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1574 /// @local is used only to convert relevant internal structures which refer to remote vs local
1575 /// to decide value of outputs and direction of HTLCs.
1576 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1577 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1578 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1579 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1580 /// which peer generated this transaction and "to whom" this transaction flows.
1582 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1583 where L::Target: Logger
1585 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1586 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1587 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1589 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1590 let mut remote_htlc_total_msat = 0;
1591 let mut local_htlc_total_msat = 0;
1592 let mut value_to_self_msat_offset = 0;
1594 let mut feerate_per_kw = self.feerate_per_kw;
1595 if let Some((feerate, update_state)) = self.pending_update_fee {
1596 if match update_state {
1597 // Note that these match the inclusion criteria when scanning
1598 // pending_inbound_htlcs below.
1599 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1600 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1601 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1603 feerate_per_kw = feerate;
1607 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1608 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1609 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1611 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1613 macro_rules! get_htlc_in_commitment {
1614 ($htlc: expr, $offered: expr) => {
1615 HTLCOutputInCommitment {
1617 amount_msat: $htlc.amount_msat,
1618 cltv_expiry: $htlc.cltv_expiry,
1619 payment_hash: $htlc.payment_hash,
1620 transaction_output_index: None
1625 macro_rules! add_htlc_output {
1626 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1627 if $outbound == local { // "offered HTLC output"
1628 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1629 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1632 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1634 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1635 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1636 included_non_dust_htlcs.push((htlc_in_tx, $source));
1638 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1639 included_dust_htlcs.push((htlc_in_tx, $source));
1642 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1643 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1646 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1648 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1649 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1650 included_non_dust_htlcs.push((htlc_in_tx, $source));
1652 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1653 included_dust_htlcs.push((htlc_in_tx, $source));
1659 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1661 for ref htlc in self.pending_inbound_htlcs.iter() {
1662 let (include, state_name) = match htlc.state {
1663 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1664 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1665 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1666 InboundHTLCState::Committed => (true, "Committed"),
1667 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1671 add_htlc_output!(htlc, false, None, state_name);
1672 remote_htlc_total_msat += htlc.amount_msat;
1674 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1676 &InboundHTLCState::LocalRemoved(ref reason) => {
1677 if generated_by_local {
1678 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1679 inbound_htlc_preimages.push(preimage);
1680 value_to_self_msat_offset += htlc.amount_msat as i64;
1690 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1692 for ref htlc in self.pending_outbound_htlcs.iter() {
1693 let (include, state_name) = match htlc.state {
1694 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1695 OutboundHTLCState::Committed => (true, "Committed"),
1696 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1697 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1698 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1701 let preimage_opt = match htlc.state {
1702 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1703 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1704 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1708 if let Some(preimage) = preimage_opt {
1709 outbound_htlc_preimages.push(preimage);
1713 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1714 local_htlc_total_msat += htlc.amount_msat;
1716 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1718 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1719 value_to_self_msat_offset -= htlc.amount_msat as i64;
1721 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1722 if !generated_by_local {
1723 value_to_self_msat_offset -= htlc.amount_msat as i64;
1731 let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1732 assert!(value_to_self_msat >= 0);
1733 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1734 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1735 // "violate" their reserve value by couting those against it. Thus, we have to convert
1736 // everything to i64 before subtracting as otherwise we can overflow.
1737 let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1738 assert!(value_to_remote_msat >= 0);
1740 #[cfg(debug_assertions)]
1742 // Make sure that the to_self/to_remote is always either past the appropriate
1743 // channel_reserve *or* it is making progress towards it.
1744 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1745 self.holder_max_commitment_tx_output.lock().unwrap()
1747 self.counterparty_max_commitment_tx_output.lock().unwrap()
1749 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1750 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1751 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1752 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1755 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1756 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1757 let (value_to_self, value_to_remote) = if self.is_outbound() {
1758 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1760 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1763 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1764 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1765 let (funding_pubkey_a, funding_pubkey_b) = if local {
1766 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1768 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1771 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1772 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1777 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1778 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1783 let num_nondust_htlcs = included_non_dust_htlcs.len();
1785 let channel_parameters =
1786 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1787 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1788 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1795 &mut included_non_dust_htlcs,
1798 let mut htlcs_included = included_non_dust_htlcs;
1799 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1800 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1801 htlcs_included.append(&mut included_dust_htlcs);
1809 local_balance_msat: value_to_self_msat as u64,
1810 remote_balance_msat: value_to_remote_msat as u64,
1811 inbound_htlc_preimages,
1812 outbound_htlc_preimages,
1817 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1818 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1819 /// our counterparty!)
1820 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1821 /// TODO Some magic rust shit to compile-time check this?
1822 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1823 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1824 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1825 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1826 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1828 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1832 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1833 /// will sign and send to our counterparty.
1834 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1835 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1836 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1837 //may see payments to it!
1838 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1839 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1840 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1842 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1845 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1846 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1847 /// Panics if called before accept_channel/InboundV1Channel::new
1848 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1849 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1852 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1853 &self.get_counterparty_pubkeys().funding_pubkey
1856 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1860 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1861 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1862 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1863 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1864 // more dust balance if the feerate increases when we have several HTLCs pending
1865 // which are near the dust limit.
1866 let mut feerate_per_kw = self.feerate_per_kw;
1867 // If there's a pending update fee, use it to ensure we aren't under-estimating
1868 // potential feerate updates coming soon.
1869 if let Some((feerate, _)) = self.pending_update_fee {
1870 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1872 if let Some(feerate) = outbound_feerate_update {
1873 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1875 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1878 /// Get forwarding information for the counterparty.
1879 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1880 self.counterparty_forwarding_info.clone()
1883 /// Returns a HTLCStats about inbound pending htlcs
1884 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1886 let mut stats = HTLCStats {
1887 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1888 pending_htlcs_value_msat: 0,
1889 on_counterparty_tx_dust_exposure_msat: 0,
1890 on_holder_tx_dust_exposure_msat: 0,
1891 holding_cell_msat: 0,
1892 on_holder_tx_holding_cell_htlcs_count: 0,
1895 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1898 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1899 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1900 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1902 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1903 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1904 for ref htlc in context.pending_inbound_htlcs.iter() {
1905 stats.pending_htlcs_value_msat += htlc.amount_msat;
1906 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1907 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1909 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1910 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1916 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1917 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1919 let mut stats = HTLCStats {
1920 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1921 pending_htlcs_value_msat: 0,
1922 on_counterparty_tx_dust_exposure_msat: 0,
1923 on_holder_tx_dust_exposure_msat: 0,
1924 holding_cell_msat: 0,
1925 on_holder_tx_holding_cell_htlcs_count: 0,
1928 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1931 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1932 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1933 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1935 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1936 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1937 for ref htlc in context.pending_outbound_htlcs.iter() {
1938 stats.pending_htlcs_value_msat += htlc.amount_msat;
1939 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1940 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1942 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1943 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1947 for update in context.holding_cell_htlc_updates.iter() {
1948 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1949 stats.pending_htlcs += 1;
1950 stats.pending_htlcs_value_msat += amount_msat;
1951 stats.holding_cell_msat += amount_msat;
1952 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1953 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1955 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1956 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1958 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1965 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1966 /// Doesn't bother handling the
1967 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1968 /// corner case properly.
1969 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1970 -> AvailableBalances
1971 where F::Target: FeeEstimator
1973 let context = &self;
1974 // Note that we have to handle overflow due to the above case.
1975 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1976 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1978 let mut balance_msat = context.value_to_self_msat;
1979 for ref htlc in context.pending_inbound_htlcs.iter() {
1980 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1981 balance_msat += htlc.amount_msat;
1984 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1986 let outbound_capacity_msat = context.value_to_self_msat
1987 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1989 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1991 let mut available_capacity_msat = outbound_capacity_msat;
1993 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1994 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1998 if context.is_outbound() {
1999 // We should mind channel commit tx fee when computing how much of the available capacity
2000 // can be used in the next htlc. Mirrors the logic in send_htlc.
2002 // The fee depends on whether the amount we will be sending is above dust or not,
2003 // and the answer will in turn change the amount itself — making it a circular
2005 // This complicates the computation around dust-values, up to the one-htlc-value.
2006 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2007 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2008 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2011 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2012 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2013 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2014 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2015 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2016 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2017 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2020 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2021 // value ends up being below dust, we have this fee available again. In that case,
2022 // match the value to right-below-dust.
2023 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2024 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2025 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2026 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2027 debug_assert!(one_htlc_difference_msat != 0);
2028 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2029 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2030 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2032 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2035 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2036 // sending a new HTLC won't reduce their balance below our reserve threshold.
2037 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2038 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2039 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2042 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2043 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2045 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2046 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2047 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2049 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2050 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2051 // we've selected for them, we can only send dust HTLCs.
2052 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2056 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2058 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2059 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2060 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2061 // send above the dust limit (as the router can always overpay to meet the dust limit).
2062 let mut remaining_msat_below_dust_exposure_limit = None;
2063 let mut dust_exposure_dust_limit_msat = 0;
2064 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2066 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2067 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2069 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2070 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2071 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2073 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2074 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2075 remaining_msat_below_dust_exposure_limit =
2076 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2077 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2080 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2081 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2082 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2083 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2084 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2085 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2088 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2089 if available_capacity_msat < dust_exposure_dust_limit_msat {
2090 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2092 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2096 available_capacity_msat = cmp::min(available_capacity_msat,
2097 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2099 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2100 available_capacity_msat = 0;
2104 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2105 - context.value_to_self_msat as i64
2106 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2107 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2109 outbound_capacity_msat,
2110 next_outbound_htlc_limit_msat: available_capacity_msat,
2111 next_outbound_htlc_minimum_msat,
2116 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2117 let context = &self;
2118 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2121 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2122 /// number of pending HTLCs that are on track to be in our next commitment tx.
2124 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2125 /// `fee_spike_buffer_htlc` is `Some`.
2127 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2128 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2130 /// Dust HTLCs are excluded.
2131 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2132 let context = &self;
2133 assert!(context.is_outbound());
2135 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2138 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2139 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2141 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2142 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2144 let mut addl_htlcs = 0;
2145 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2147 HTLCInitiator::LocalOffered => {
2148 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2152 HTLCInitiator::RemoteOffered => {
2153 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2159 let mut included_htlcs = 0;
2160 for ref htlc in context.pending_inbound_htlcs.iter() {
2161 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2164 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2165 // transaction including this HTLC if it times out before they RAA.
2166 included_htlcs += 1;
2169 for ref htlc in context.pending_outbound_htlcs.iter() {
2170 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2174 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2175 OutboundHTLCState::Committed => included_htlcs += 1,
2176 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2177 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2178 // transaction won't be generated until they send us their next RAA, which will mean
2179 // dropping any HTLCs in this state.
2184 for htlc in context.holding_cell_htlc_updates.iter() {
2186 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2187 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2192 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2193 // ack we're guaranteed to never include them in commitment txs anymore.
2197 let num_htlcs = included_htlcs + addl_htlcs;
2198 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2199 #[cfg(any(test, fuzzing))]
2202 if fee_spike_buffer_htlc.is_some() {
2203 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2205 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2206 + context.holding_cell_htlc_updates.len();
2207 let commitment_tx_info = CommitmentTxInfoCached {
2209 total_pending_htlcs,
2210 next_holder_htlc_id: match htlc.origin {
2211 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2212 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2214 next_counterparty_htlc_id: match htlc.origin {
2215 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2216 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2218 feerate: context.feerate_per_kw,
2220 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2225 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2226 /// pending HTLCs that are on track to be in their next commitment tx
2228 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2229 /// `fee_spike_buffer_htlc` is `Some`.
2231 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2232 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2234 /// Dust HTLCs are excluded.
2235 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2236 let context = &self;
2237 assert!(!context.is_outbound());
2239 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2242 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2243 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2245 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2246 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2248 let mut addl_htlcs = 0;
2249 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2251 HTLCInitiator::LocalOffered => {
2252 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2256 HTLCInitiator::RemoteOffered => {
2257 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2263 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2264 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2265 // committed outbound HTLCs, see below.
2266 let mut included_htlcs = 0;
2267 for ref htlc in context.pending_inbound_htlcs.iter() {
2268 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2271 included_htlcs += 1;
2274 for ref htlc in context.pending_outbound_htlcs.iter() {
2275 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2278 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2279 // i.e. if they've responded to us with an RAA after announcement.
2281 OutboundHTLCState::Committed => included_htlcs += 1,
2282 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2283 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2288 let num_htlcs = included_htlcs + addl_htlcs;
2289 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2290 #[cfg(any(test, fuzzing))]
2293 if fee_spike_buffer_htlc.is_some() {
2294 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2296 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2297 let commitment_tx_info = CommitmentTxInfoCached {
2299 total_pending_htlcs,
2300 next_holder_htlc_id: match htlc.origin {
2301 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2302 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2304 next_counterparty_htlc_id: match htlc.origin {
2305 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2306 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2308 feerate: context.feerate_per_kw,
2310 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2315 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2316 where F: Fn() -> Option<O> {
2317 match self.channel_state {
2318 ChannelState::FundingNegotiated => f(),
2319 ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) {
2328 /// Returns the transaction if there is a pending funding transaction that is yet to be
2330 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2331 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2334 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2336 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2337 self.if_unbroadcasted_funding(||
2338 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2342 /// Returns whether the channel is funded in a batch.
2343 pub fn is_batch_funding(&self) -> bool {
2344 self.is_batch_funding.is_some()
2347 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2349 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2350 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2353 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2354 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2355 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2356 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2357 /// immediately (others we will have to allow to time out).
2358 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2359 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2360 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2361 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2362 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2363 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2365 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2366 // return them to fail the payment.
2367 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2368 let counterparty_node_id = self.get_counterparty_node_id();
2369 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2371 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2372 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2377 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2378 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2379 // returning a channel monitor update here would imply a channel monitor update before
2380 // we even registered the channel monitor to begin with, which is invalid.
2381 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2382 // funding transaction, don't return a funding txo (which prevents providing the
2383 // monitor update to the user, even if we return one).
2384 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2385 let generate_monitor_update = match self.channel_state {
2386 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
2389 if generate_monitor_update {
2390 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2391 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2392 update_id: self.latest_monitor_update_id,
2393 counterparty_node_id: Some(self.counterparty_node_id),
2394 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2398 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2400 self.channel_state = ChannelState::ShutdownComplete;
2401 self.update_time_counter += 1;
2404 dropped_outbound_htlcs,
2405 unbroadcasted_batch_funding_txid,
2406 channel_id: self.channel_id,
2407 counterparty_node_id: self.counterparty_node_id,
2411 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2412 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2413 let counterparty_keys = self.build_remote_transaction_keys();
2414 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2416 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2417 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2418 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2419 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2421 match &self.holder_signer {
2422 // TODO (arik): move match into calling method for Taproot
2423 ChannelSignerType::Ecdsa(ecdsa) => {
2424 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2425 .map(|(signature, _)| msgs::FundingSigned {
2426 channel_id: self.channel_id(),
2429 partial_signature_with_nonce: None,
2433 if funding_signed.is_none() {
2434 #[cfg(not(async_signing))] {
2435 panic!("Failed to get signature for funding_signed");
2437 #[cfg(async_signing)] {
2438 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2439 self.signer_pending_funding = true;
2441 } else if self.signer_pending_funding {
2442 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2443 self.signer_pending_funding = false;
2446 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2447 (counterparty_initial_commitment_tx, funding_signed)
2449 // TODO (taproot|arik)
2456 // Internal utility functions for channels
2458 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2459 /// `channel_value_satoshis` in msat, set through
2460 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2462 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2464 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2465 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2466 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2468 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2471 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2473 channel_value_satoshis * 10 * configured_percent
2476 /// Returns a minimum channel reserve value the remote needs to maintain,
2477 /// required by us according to the configured or default
2478 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2480 /// Guaranteed to return a value no larger than channel_value_satoshis
2482 /// This is used both for outbound and inbound channels and has lower bound
2483 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2484 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2485 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2486 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2489 /// This is for legacy reasons, present for forward-compatibility.
2490 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2491 /// from storage. Hence, we use this function to not persist default values of
2492 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2493 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2494 let (q, _) = channel_value_satoshis.overflowing_div(100);
2495 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2498 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2499 // Note that num_htlcs should not include dust HTLCs.
2501 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2502 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2505 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2506 // Note that num_htlcs should not include dust HTLCs.
2507 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2508 // Note that we need to divide before multiplying to round properly,
2509 // since the lowest denomination of bitcoin on-chain is the satoshi.
2510 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2513 // Holder designates channel data owned for the benefit of the user client.
2514 // Counterparty designates channel data owned by the another channel participant entity.
2515 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2516 pub context: ChannelContext<SP>,
2519 #[cfg(any(test, fuzzing))]
2520 struct CommitmentTxInfoCached {
2522 total_pending_htlcs: usize,
2523 next_holder_htlc_id: u64,
2524 next_counterparty_htlc_id: u64,
2528 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
2529 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
2530 trait FailHTLCContents {
2531 type Message: FailHTLCMessageName;
2532 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
2533 fn to_inbound_htlc_state(self) -> InboundHTLCState;
2534 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
2536 impl FailHTLCContents for msgs::OnionErrorPacket {
2537 type Message = msgs::UpdateFailHTLC;
2538 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2539 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
2541 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2542 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
2544 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2545 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
2548 impl FailHTLCContents for (u16, [u8; 32]) {
2549 type Message = msgs::UpdateFailMalformedHTLC; // (failure_code, sha256_of_onion)
2550 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2551 msgs::UpdateFailMalformedHTLC {
2554 failure_code: self.0,
2555 sha256_of_onion: self.1
2558 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2559 InboundHTLCState::LocalRemoved(
2560 InboundHTLCRemovalReason::FailMalformed((self.1, self.0))
2563 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2564 HTLCUpdateAwaitingACK::FailMalformedHTLC {
2566 failure_code: self.0,
2567 sha256_of_onion: self.1
2572 trait FailHTLCMessageName {
2573 fn name() -> &'static str;
2575 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
2576 fn name() -> &'static str {
2580 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
2581 fn name() -> &'static str {
2582 "update_fail_malformed_htlc"
2586 impl<SP: Deref> Channel<SP> where
2587 SP::Target: SignerProvider,
2588 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2590 fn check_remote_fee<F: Deref, L: Deref>(
2591 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2592 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2593 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2595 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2596 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2598 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2600 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2601 if feerate_per_kw < lower_limit {
2602 if let Some(cur_feerate) = cur_feerate_per_kw {
2603 if feerate_per_kw > cur_feerate {
2605 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2606 cur_feerate, feerate_per_kw);
2610 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2616 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2617 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2618 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2619 // outside of those situations will fail.
2620 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2624 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2629 1 + // script length (0)
2633 )*4 + // * 4 for non-witness parts
2634 2 + // witness marker and flag
2635 1 + // witness element count
2636 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2637 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2638 2*(1 + 71); // two signatures + sighash type flags
2639 if let Some(spk) = a_scriptpubkey {
2640 ret += ((8+1) + // output values and script length
2641 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2643 if let Some(spk) = b_scriptpubkey {
2644 ret += ((8+1) + // output values and script length
2645 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2651 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2652 assert!(self.context.pending_inbound_htlcs.is_empty());
2653 assert!(self.context.pending_outbound_htlcs.is_empty());
2654 assert!(self.context.pending_update_fee.is_none());
2656 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2657 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2658 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2660 if value_to_holder < 0 {
2661 assert!(self.context.is_outbound());
2662 total_fee_satoshis += (-value_to_holder) as u64;
2663 } else if value_to_counterparty < 0 {
2664 assert!(!self.context.is_outbound());
2665 total_fee_satoshis += (-value_to_counterparty) as u64;
2668 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2669 value_to_counterparty = 0;
2672 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2673 value_to_holder = 0;
2676 assert!(self.context.shutdown_scriptpubkey.is_some());
2677 let holder_shutdown_script = self.get_closing_scriptpubkey();
2678 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2679 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2681 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2682 (closing_transaction, total_fee_satoshis)
2685 fn funding_outpoint(&self) -> OutPoint {
2686 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2689 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2692 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2693 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2695 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2697 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2698 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2699 where L::Target: Logger {
2700 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2701 // (see equivalent if condition there).
2702 assert!(self.context.channel_state.should_force_holding_cell());
2703 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2704 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2705 self.context.latest_monitor_update_id = mon_update_id;
2706 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2707 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2711 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2712 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2713 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2714 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2716 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2717 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2720 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2721 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2722 // these, but for now we just have to treat them as normal.
2724 let mut pending_idx = core::usize::MAX;
2725 let mut htlc_value_msat = 0;
2726 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2727 if htlc.htlc_id == htlc_id_arg {
2728 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2729 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2730 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2732 InboundHTLCState::Committed => {},
2733 InboundHTLCState::LocalRemoved(ref reason) => {
2734 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2736 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2737 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2739 return UpdateFulfillFetch::DuplicateClaim {};
2742 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2743 // Don't return in release mode here so that we can update channel_monitor
2747 htlc_value_msat = htlc.amount_msat;
2751 if pending_idx == core::usize::MAX {
2752 #[cfg(any(test, fuzzing))]
2753 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2754 // this is simply a duplicate claim, not previously failed and we lost funds.
2755 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2756 return UpdateFulfillFetch::DuplicateClaim {};
2759 // Now update local state:
2761 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2762 // can claim it even if the channel hits the chain before we see their next commitment.
2763 self.context.latest_monitor_update_id += 1;
2764 let monitor_update = ChannelMonitorUpdate {
2765 update_id: self.context.latest_monitor_update_id,
2766 counterparty_node_id: Some(self.context.counterparty_node_id),
2767 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2768 payment_preimage: payment_preimage_arg.clone(),
2772 if self.context.channel_state.should_force_holding_cell() {
2773 // Note that this condition is the same as the assertion in
2774 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2775 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2776 // do not not get into this branch.
2777 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2778 match pending_update {
2779 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2780 if htlc_id_arg == htlc_id {
2781 // Make sure we don't leave latest_monitor_update_id incremented here:
2782 self.context.latest_monitor_update_id -= 1;
2783 #[cfg(any(test, fuzzing))]
2784 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2785 return UpdateFulfillFetch::DuplicateClaim {};
2788 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2789 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2791 if htlc_id_arg == htlc_id {
2792 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2793 // TODO: We may actually be able to switch to a fulfill here, though its
2794 // rare enough it may not be worth the complexity burden.
2795 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2796 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2802 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
2803 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2804 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2806 #[cfg(any(test, fuzzing))]
2807 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2808 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2810 #[cfg(any(test, fuzzing))]
2811 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2814 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2815 if let InboundHTLCState::Committed = htlc.state {
2817 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2818 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2820 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2821 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2824 UpdateFulfillFetch::NewClaim {
2827 msg: Some(msgs::UpdateFulfillHTLC {
2828 channel_id: self.context.channel_id(),
2829 htlc_id: htlc_id_arg,
2830 payment_preimage: payment_preimage_arg,
2835 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2836 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2837 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2838 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2839 // Even if we aren't supposed to let new monitor updates with commitment state
2840 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2841 // matter what. Sadly, to push a new monitor update which flies before others
2842 // already queued, we have to insert it into the pending queue and update the
2843 // update_ids of all the following monitors.
2844 if release_cs_monitor && msg.is_some() {
2845 let mut additional_update = self.build_commitment_no_status_check(logger);
2846 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2847 // to be strictly increasing by one, so decrement it here.
2848 self.context.latest_monitor_update_id = monitor_update.update_id;
2849 monitor_update.updates.append(&mut additional_update.updates);
2851 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2852 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2853 monitor_update.update_id = new_mon_id;
2854 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2855 held_update.update.update_id += 1;
2858 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2859 let update = self.build_commitment_no_status_check(logger);
2860 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2866 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2867 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2869 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2873 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2874 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2875 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2876 /// before we fail backwards.
2878 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2879 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2880 /// [`ChannelError::Ignore`].
2881 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2882 -> Result<(), ChannelError> where L::Target: Logger {
2883 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2884 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2887 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
2888 /// want to fail blinded HTLCs where we are not the intro node.
2890 /// See [`Self::queue_fail_htlc`] for more info.
2891 pub fn queue_fail_malformed_htlc<L: Deref>(
2892 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
2893 ) -> Result<(), ChannelError> where L::Target: Logger {
2894 self.fail_htlc(htlc_id_arg, (failure_code, sha256_of_onion), true, logger)
2895 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2898 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2899 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2900 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2901 /// before we fail backwards.
2903 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2904 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2905 /// [`ChannelError::Ignore`].
2906 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
2907 &mut self, htlc_id_arg: u64, err_packet: E, mut force_holding_cell: bool,
2909 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
2910 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2911 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2914 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2915 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2916 // these, but for now we just have to treat them as normal.
2918 let mut pending_idx = core::usize::MAX;
2919 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2920 if htlc.htlc_id == htlc_id_arg {
2922 InboundHTLCState::Committed => {},
2923 InboundHTLCState::LocalRemoved(ref reason) => {
2924 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2926 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2931 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2932 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2938 if pending_idx == core::usize::MAX {
2939 #[cfg(any(test, fuzzing))]
2940 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2941 // is simply a duplicate fail, not previously failed and we failed-back too early.
2942 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2946 if self.context.channel_state.should_force_holding_cell() {
2947 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2948 force_holding_cell = true;
2951 // Now update local state:
2952 if force_holding_cell {
2953 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2954 match pending_update {
2955 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2956 if htlc_id_arg == htlc_id {
2957 #[cfg(any(test, fuzzing))]
2958 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2962 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2963 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2965 if htlc_id_arg == htlc_id {
2966 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2967 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2973 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2974 self.context.holding_cell_htlc_updates.push(err_packet.to_htlc_update_awaiting_ack(htlc_id_arg));
2978 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
2979 E::Message::name(), &self.context.channel_id());
2981 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2982 htlc.state = err_packet.clone().to_inbound_htlc_state();
2985 Ok(Some(err_packet.to_message(htlc_id_arg, self.context.channel_id())))
2988 // Message handlers:
2989 /// Updates the state of the channel to indicate that all channels in the batch have received
2990 /// funding_signed and persisted their monitors.
2991 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2992 /// treated as a non-batch channel going forward.
2993 pub fn set_batch_ready(&mut self) {
2994 self.context.is_batch_funding = None;
2995 self.context.channel_state.clear_waiting_for_batch();
2998 /// Unsets the existing funding information.
3000 /// This must only be used if the channel has not yet completed funding and has not been used.
3002 /// Further, the channel must be immediately shut down after this with a call to
3003 /// [`ChannelContext::force_shutdown`].
3004 pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
3005 debug_assert!(matches!(
3006 self.context.channel_state, ChannelState::AwaitingChannelReady(_)
3008 self.context.channel_transaction_parameters.funding_outpoint = None;
3009 self.context.channel_id = temporary_channel_id;
3012 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3013 /// and the channel is now usable (and public), this may generate an announcement_signatures to
3015 pub fn channel_ready<NS: Deref, L: Deref>(
3016 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
3017 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3018 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3020 NS::Target: NodeSigner,
3023 if self.context.channel_state.is_peer_disconnected() {
3024 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3025 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3028 if let Some(scid_alias) = msg.short_channel_id_alias {
3029 if Some(scid_alias) != self.context.short_channel_id {
3030 // The scid alias provided can be used to route payments *from* our counterparty,
3031 // i.e. can be used for inbound payments and provided in invoices, but is not used
3032 // when routing outbound payments.
3033 self.context.latest_inbound_scid_alias = Some(scid_alias);
3037 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3038 // batch, but we can receive channel_ready messages.
3039 let mut check_reconnection = false;
3040 match &self.context.channel_state {
3041 ChannelState::AwaitingChannelReady(flags) => {
3042 let flags = *flags & !FundedStateFlags::ALL;
3043 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3044 if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3045 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3046 check_reconnection = true;
3047 } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3048 self.context.channel_state.set_their_channel_ready();
3049 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3050 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3051 self.context.update_time_counter += 1;
3053 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3054 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3057 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3058 ChannelState::ChannelReady(_) => check_reconnection = true,
3059 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
3061 if check_reconnection {
3062 // They probably disconnected/reconnected and re-sent the channel_ready, which is
3063 // required, or they're sending a fresh SCID alias.
3064 let expected_point =
3065 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3066 // If they haven't ever sent an updated point, the point they send should match
3068 self.context.counterparty_cur_commitment_point
3069 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3070 // If we've advanced the commitment number once, the second commitment point is
3071 // at `counterparty_prev_commitment_point`, which is not yet revoked.
3072 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3073 self.context.counterparty_prev_commitment_point
3075 // If they have sent updated points, channel_ready is always supposed to match
3076 // their "first" point, which we re-derive here.
3077 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3078 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3079 ).expect("We already advanced, so previous secret keys should have been validated already")))
3081 if expected_point != Some(msg.next_per_commitment_point) {
3082 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3087 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3088 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3090 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3092 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3095 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3096 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3097 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3098 ) -> Result<(), ChannelError>
3099 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3100 FE::Target: FeeEstimator, L::Target: Logger,
3102 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3103 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3105 // We can't accept HTLCs sent after we've sent a shutdown.
3106 if self.context.channel_state.is_local_shutdown_sent() {
3107 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3109 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3110 if self.context.channel_state.is_remote_shutdown_sent() {
3111 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3113 if self.context.channel_state.is_peer_disconnected() {
3114 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3116 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3117 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3119 if msg.amount_msat == 0 {
3120 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3122 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3123 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3126 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3127 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3128 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3129 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3131 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3132 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3135 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3136 // the reserve_satoshis we told them to always have as direct payment so that they lose
3137 // something if we punish them for broadcasting an old state).
3138 // Note that we don't really care about having a small/no to_remote output in our local
3139 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3140 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3141 // present in the next commitment transaction we send them (at least for fulfilled ones,
3142 // failed ones won't modify value_to_self).
3143 // Note that we will send HTLCs which another instance of rust-lightning would think
3144 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3145 // Channel state once they will not be present in the next received commitment
3147 let mut removed_outbound_total_msat = 0;
3148 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3149 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3150 removed_outbound_total_msat += htlc.amount_msat;
3151 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3152 removed_outbound_total_msat += htlc.amount_msat;
3156 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3157 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3160 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3161 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3162 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3164 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3165 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3166 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3167 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3168 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3169 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3170 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3174 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3175 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3176 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3177 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3178 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3179 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3180 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3184 let pending_value_to_self_msat =
3185 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3186 let pending_remote_value_msat =
3187 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3188 if pending_remote_value_msat < msg.amount_msat {
3189 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3192 // Check that the remote can afford to pay for this HTLC on-chain at the current
3193 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3195 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3196 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3197 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3199 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3200 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3204 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3205 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3207 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3208 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3212 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3213 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3217 if !self.context.is_outbound() {
3218 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3219 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3220 // side, only on the sender's. Note that with anchor outputs we are no longer as
3221 // sensitive to fee spikes, so we need to account for them.
3222 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3223 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3224 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3225 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3227 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3228 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3229 // the HTLC, i.e. its status is already set to failing.
3230 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3231 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3234 // Check that they won't violate our local required channel reserve by adding this HTLC.
3235 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3236 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3237 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3238 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3241 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3242 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3244 if msg.cltv_expiry >= 500000000 {
3245 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3248 if self.context.channel_state.is_local_shutdown_sent() {
3249 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3250 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3254 // Now update local state:
3255 self.context.next_counterparty_htlc_id += 1;
3256 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3257 htlc_id: msg.htlc_id,
3258 amount_msat: msg.amount_msat,
3259 payment_hash: msg.payment_hash,
3260 cltv_expiry: msg.cltv_expiry,
3261 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3266 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3268 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3269 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3270 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3271 if htlc.htlc_id == htlc_id {
3272 let outcome = match check_preimage {
3273 None => fail_reason.into(),
3274 Some(payment_preimage) => {
3275 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3276 if payment_hash != htlc.payment_hash {
3277 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3279 OutboundHTLCOutcome::Success(Some(payment_preimage))
3283 OutboundHTLCState::LocalAnnounced(_) =>
3284 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3285 OutboundHTLCState::Committed => {
3286 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3288 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3289 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3294 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3297 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3298 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3299 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3301 if self.context.channel_state.is_peer_disconnected() {
3302 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3305 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3308 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3309 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3310 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3312 if self.context.channel_state.is_peer_disconnected() {
3313 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3316 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3320 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3321 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3322 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3324 if self.context.channel_state.is_peer_disconnected() {
3325 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3328 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3332 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3333 where L::Target: Logger
3335 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3336 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3338 if self.context.channel_state.is_peer_disconnected() {
3339 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3341 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3342 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3345 let funding_script = self.context.get_funding_redeemscript();
3347 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3349 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3350 let commitment_txid = {
3351 let trusted_tx = commitment_stats.tx.trust();
3352 let bitcoin_tx = trusted_tx.built_transaction();
3353 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3355 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3356 log_bytes!(msg.signature.serialize_compact()[..]),
3357 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3358 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3359 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3360 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3364 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3366 // If our counterparty updated the channel fee in this commitment transaction, check that
3367 // they can actually afford the new fee now.
3368 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3369 update_state == FeeUpdateState::RemoteAnnounced
3372 debug_assert!(!self.context.is_outbound());
3373 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3374 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3375 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3378 #[cfg(any(test, fuzzing))]
3380 if self.context.is_outbound() {
3381 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3382 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3383 if let Some(info) = projected_commit_tx_info {
3384 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3385 + self.context.holding_cell_htlc_updates.len();
3386 if info.total_pending_htlcs == total_pending_htlcs
3387 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3388 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3389 && info.feerate == self.context.feerate_per_kw {
3390 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3396 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3397 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3400 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3401 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3402 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3403 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3404 // backwards compatibility, we never use it in production. To provide test coverage, here,
3405 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3406 #[allow(unused_assignments, unused_mut)]
3407 let mut separate_nondust_htlc_sources = false;
3408 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3409 use core::hash::{BuildHasher, Hasher};
3410 // Get a random value using the only std API to do so - the DefaultHasher
3411 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3412 separate_nondust_htlc_sources = rand_val % 2 == 0;
3415 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3416 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3417 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3418 if let Some(_) = htlc.transaction_output_index {
3419 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3420 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3421 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3423 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3424 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3425 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3426 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3427 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3428 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3429 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3430 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3432 if !separate_nondust_htlc_sources {
3433 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3436 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3438 if separate_nondust_htlc_sources {
3439 if let Some(source) = source_opt.take() {
3440 nondust_htlc_sources.push(source);
3443 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3446 let holder_commitment_tx = HolderCommitmentTransaction::new(
3447 commitment_stats.tx,
3449 msg.htlc_signatures.clone(),
3450 &self.context.get_holder_pubkeys().funding_pubkey,
3451 self.context.counterparty_funding_pubkey()
3454 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3455 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3457 // Update state now that we've passed all the can-fail calls...
3458 let mut need_commitment = false;
3459 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3460 if *update_state == FeeUpdateState::RemoteAnnounced {
3461 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3462 need_commitment = true;
3466 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3467 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3468 Some(forward_info.clone())
3470 if let Some(forward_info) = new_forward {
3471 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3472 &htlc.payment_hash, &self.context.channel_id);
3473 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3474 need_commitment = true;
3477 let mut claimed_htlcs = Vec::new();
3478 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3479 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3480 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3481 &htlc.payment_hash, &self.context.channel_id);
3482 // Grab the preimage, if it exists, instead of cloning
3483 let mut reason = OutboundHTLCOutcome::Success(None);
3484 mem::swap(outcome, &mut reason);
3485 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3486 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3487 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3488 // have a `Success(None)` reason. In this case we could forget some HTLC
3489 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3490 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3492 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3494 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3495 need_commitment = true;
3499 self.context.latest_monitor_update_id += 1;
3500 let mut monitor_update = ChannelMonitorUpdate {
3501 update_id: self.context.latest_monitor_update_id,
3502 counterparty_node_id: Some(self.context.counterparty_node_id),
3503 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3504 commitment_tx: holder_commitment_tx,
3505 htlc_outputs: htlcs_and_sigs,
3507 nondust_htlc_sources,
3511 self.context.cur_holder_commitment_transaction_number -= 1;
3512 self.context.expecting_peer_commitment_signed = false;
3513 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3514 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3515 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3517 if self.context.channel_state.is_monitor_update_in_progress() {
3518 // In case we initially failed monitor updating without requiring a response, we need
3519 // to make sure the RAA gets sent first.
3520 self.context.monitor_pending_revoke_and_ack = true;
3521 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3522 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3523 // the corresponding HTLC status updates so that
3524 // get_last_commitment_update_for_send includes the right HTLCs.
3525 self.context.monitor_pending_commitment_signed = true;
3526 let mut additional_update = self.build_commitment_no_status_check(logger);
3527 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3528 // strictly increasing by one, so decrement it here.
3529 self.context.latest_monitor_update_id = monitor_update.update_id;
3530 monitor_update.updates.append(&mut additional_update.updates);
3532 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3533 &self.context.channel_id);
3534 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3537 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3538 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3539 // we'll send one right away when we get the revoke_and_ack when we
3540 // free_holding_cell_htlcs().
3541 let mut additional_update = self.build_commitment_no_status_check(logger);
3542 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3543 // strictly increasing by one, so decrement it here.
3544 self.context.latest_monitor_update_id = monitor_update.update_id;
3545 monitor_update.updates.append(&mut additional_update.updates);
3549 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3550 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3551 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3552 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3555 /// Public version of the below, checking relevant preconditions first.
3556 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3557 /// returns `(None, Vec::new())`.
3558 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3559 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3560 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3561 where F::Target: FeeEstimator, L::Target: Logger
3563 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
3564 self.free_holding_cell_htlcs(fee_estimator, logger)
3565 } else { (None, Vec::new()) }
3568 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3569 /// for our counterparty.
3570 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3571 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3572 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3573 where F::Target: FeeEstimator, L::Target: Logger
3575 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3576 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3577 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3578 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3580 let mut monitor_update = ChannelMonitorUpdate {
3581 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3582 counterparty_node_id: Some(self.context.counterparty_node_id),
3583 updates: Vec::new(),
3586 let mut htlc_updates = Vec::new();
3587 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3588 let mut update_add_count = 0;
3589 let mut update_fulfill_count = 0;
3590 let mut update_fail_count = 0;
3591 let mut htlcs_to_fail = Vec::new();
3592 for htlc_update in htlc_updates.drain(..) {
3593 // Note that this *can* fail, though it should be due to rather-rare conditions on
3594 // fee races with adding too many outputs which push our total payments just over
3595 // the limit. In case it's less rare than I anticipate, we may want to revisit
3596 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3597 // to rebalance channels.
3598 match &htlc_update {
3599 &HTLCUpdateAwaitingACK::AddHTLC {
3600 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3601 skimmed_fee_msat, blinding_point, ..
3603 match self.send_htlc(
3604 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3605 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3607 Ok(_) => update_add_count += 1,
3610 ChannelError::Ignore(ref msg) => {
3611 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3612 // If we fail to send here, then this HTLC should
3613 // be failed backwards. Failing to send here
3614 // indicates that this HTLC may keep being put back
3615 // into the holding cell without ever being
3616 // successfully forwarded/failed/fulfilled, causing
3617 // our counterparty to eventually close on us.
3618 htlcs_to_fail.push((source.clone(), *payment_hash));
3621 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3627 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3628 // If an HTLC claim was previously added to the holding cell (via
3629 // `get_update_fulfill_htlc`, then generating the claim message itself must
3630 // not fail - any in between attempts to claim the HTLC will have resulted
3631 // in it hitting the holding cell again and we cannot change the state of a
3632 // holding cell HTLC from fulfill to anything else.
3633 let mut additional_monitor_update =
3634 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3635 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3636 { monitor_update } else { unreachable!() };
3637 update_fulfill_count += 1;
3638 monitor_update.updates.append(&mut additional_monitor_update.updates);
3640 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3641 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3642 Ok(update_fail_msg_option) => {
3643 // If an HTLC failure was previously added to the holding cell (via
3644 // `queue_fail_htlc`) then generating the fail message itself must
3645 // not fail - we should never end up in a state where we double-fail
3646 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3647 // for a full revocation before failing.
3648 debug_assert!(update_fail_msg_option.is_some());
3649 update_fail_count += 1;
3652 if let ChannelError::Ignore(_) = e {}
3654 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3659 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
3660 match self.fail_htlc(htlc_id, (failure_code, sha256_of_onion), false, logger) {
3661 Ok(update_fail_malformed_opt) => {
3662 debug_assert!(update_fail_malformed_opt.is_some()); // See above comment
3663 update_fail_count += 1;
3666 if let ChannelError::Ignore(_) = e {}
3668 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3675 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3676 return (None, htlcs_to_fail);
3678 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3679 self.send_update_fee(feerate, false, fee_estimator, logger)
3684 let mut additional_update = self.build_commitment_no_status_check(logger);
3685 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3686 // but we want them to be strictly increasing by one, so reset it here.
3687 self.context.latest_monitor_update_id = monitor_update.update_id;
3688 monitor_update.updates.append(&mut additional_update.updates);
3690 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3691 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3692 update_add_count, update_fulfill_count, update_fail_count);
3694 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3695 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3701 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3702 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3703 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3704 /// generating an appropriate error *after* the channel state has been updated based on the
3705 /// revoke_and_ack message.
3706 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3707 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3708 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3709 where F::Target: FeeEstimator, L::Target: Logger,
3711 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3712 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3714 if self.context.channel_state.is_peer_disconnected() {
3715 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3717 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3718 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3721 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3723 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3724 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3725 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3729 if !self.context.channel_state.is_awaiting_remote_revoke() {
3730 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3731 // haven't given them a new commitment transaction to broadcast). We should probably
3732 // take advantage of this by updating our channel monitor, sending them an error, and
3733 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3734 // lot of work, and there's some chance this is all a misunderstanding anyway.
3735 // We have to do *something*, though, since our signer may get mad at us for otherwise
3736 // jumping a remote commitment number, so best to just force-close and move on.
3737 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3740 #[cfg(any(test, fuzzing))]
3742 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3743 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3746 match &self.context.holder_signer {
3747 ChannelSignerType::Ecdsa(ecdsa) => {
3748 ecdsa.validate_counterparty_revocation(
3749 self.context.cur_counterparty_commitment_transaction_number + 1,
3751 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3753 // TODO (taproot|arik)
3758 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3759 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3760 self.context.latest_monitor_update_id += 1;
3761 let mut monitor_update = ChannelMonitorUpdate {
3762 update_id: self.context.latest_monitor_update_id,
3763 counterparty_node_id: Some(self.context.counterparty_node_id),
3764 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3765 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3766 secret: msg.per_commitment_secret,
3770 // Update state now that we've passed all the can-fail calls...
3771 // (note that we may still fail to generate the new commitment_signed message, but that's
3772 // OK, we step the channel here and *then* if the new generation fails we can fail the
3773 // channel based on that, but stepping stuff here should be safe either way.
3774 self.context.channel_state.clear_awaiting_remote_revoke();
3775 self.context.sent_message_awaiting_response = None;
3776 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3777 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3778 self.context.cur_counterparty_commitment_transaction_number -= 1;
3780 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3781 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3784 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3785 let mut to_forward_infos = Vec::new();
3786 let mut revoked_htlcs = Vec::new();
3787 let mut finalized_claimed_htlcs = Vec::new();
3788 let mut update_fail_htlcs = Vec::new();
3789 let mut update_fail_malformed_htlcs = Vec::new();
3790 let mut require_commitment = false;
3791 let mut value_to_self_msat_diff: i64 = 0;
3794 // Take references explicitly so that we can hold multiple references to self.context.
3795 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3796 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3797 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3799 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3800 pending_inbound_htlcs.retain(|htlc| {
3801 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3802 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3803 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3804 value_to_self_msat_diff += htlc.amount_msat as i64;
3806 *expecting_peer_commitment_signed = true;
3810 pending_outbound_htlcs.retain(|htlc| {
3811 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3812 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3813 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3814 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3816 finalized_claimed_htlcs.push(htlc.source.clone());
3817 // They fulfilled, so we sent them money
3818 value_to_self_msat_diff -= htlc.amount_msat as i64;
3823 for htlc in pending_inbound_htlcs.iter_mut() {
3824 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3826 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3830 let mut state = InboundHTLCState::Committed;
3831 mem::swap(&mut state, &mut htlc.state);
3833 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3834 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3835 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3836 require_commitment = true;
3837 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3838 match forward_info {
3839 PendingHTLCStatus::Fail(fail_msg) => {
3840 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3841 require_commitment = true;
3843 HTLCFailureMsg::Relay(msg) => {
3844 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3845 update_fail_htlcs.push(msg)
3847 HTLCFailureMsg::Malformed(msg) => {
3848 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3849 update_fail_malformed_htlcs.push(msg)
3853 PendingHTLCStatus::Forward(forward_info) => {
3854 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3855 to_forward_infos.push((forward_info, htlc.htlc_id));
3856 htlc.state = InboundHTLCState::Committed;
3862 for htlc in pending_outbound_htlcs.iter_mut() {
3863 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3864 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3865 htlc.state = OutboundHTLCState::Committed;
3866 *expecting_peer_commitment_signed = true;
3868 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3869 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3870 // Grab the preimage, if it exists, instead of cloning
3871 let mut reason = OutboundHTLCOutcome::Success(None);
3872 mem::swap(outcome, &mut reason);
3873 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3874 require_commitment = true;
3878 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3880 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3881 match update_state {
3882 FeeUpdateState::Outbound => {
3883 debug_assert!(self.context.is_outbound());
3884 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3885 self.context.feerate_per_kw = feerate;
3886 self.context.pending_update_fee = None;
3887 self.context.expecting_peer_commitment_signed = true;
3889 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3890 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3891 debug_assert!(!self.context.is_outbound());
3892 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3893 require_commitment = true;
3894 self.context.feerate_per_kw = feerate;
3895 self.context.pending_update_fee = None;
3900 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3901 let release_state_str =
3902 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3903 macro_rules! return_with_htlcs_to_fail {
3904 ($htlcs_to_fail: expr) => {
3905 if !release_monitor {
3906 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3907 update: monitor_update,
3909 return Ok(($htlcs_to_fail, None));
3911 return Ok(($htlcs_to_fail, Some(monitor_update)));
3916 if self.context.channel_state.is_monitor_update_in_progress() {
3917 // We can't actually generate a new commitment transaction (incl by freeing holding
3918 // cells) while we can't update the monitor, so we just return what we have.
3919 if require_commitment {
3920 self.context.monitor_pending_commitment_signed = true;
3921 // When the monitor updating is restored we'll call
3922 // get_last_commitment_update_for_send(), which does not update state, but we're
3923 // definitely now awaiting a remote revoke before we can step forward any more, so
3925 let mut additional_update = self.build_commitment_no_status_check(logger);
3926 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3927 // strictly increasing by one, so decrement it here.
3928 self.context.latest_monitor_update_id = monitor_update.update_id;
3929 monitor_update.updates.append(&mut additional_update.updates);
3931 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3932 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3933 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3934 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3935 return_with_htlcs_to_fail!(Vec::new());
3938 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3939 (Some(mut additional_update), htlcs_to_fail) => {
3940 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3941 // strictly increasing by one, so decrement it here.
3942 self.context.latest_monitor_update_id = monitor_update.update_id;
3943 monitor_update.updates.append(&mut additional_update.updates);
3945 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3946 &self.context.channel_id(), release_state_str);
3948 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3949 return_with_htlcs_to_fail!(htlcs_to_fail);
3951 (None, htlcs_to_fail) => {
3952 if require_commitment {
3953 let mut additional_update = self.build_commitment_no_status_check(logger);
3955 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3956 // strictly increasing by one, so decrement it here.
3957 self.context.latest_monitor_update_id = monitor_update.update_id;
3958 monitor_update.updates.append(&mut additional_update.updates);
3960 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3961 &self.context.channel_id(),
3962 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3965 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3966 return_with_htlcs_to_fail!(htlcs_to_fail);
3968 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3969 &self.context.channel_id(), release_state_str);
3971 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3972 return_with_htlcs_to_fail!(htlcs_to_fail);
3978 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3979 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3980 /// commitment update.
3981 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3982 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3983 where F::Target: FeeEstimator, L::Target: Logger
3985 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3986 assert!(msg_opt.is_none(), "We forced holding cell?");
3989 /// Adds a pending update to this channel. See the doc for send_htlc for
3990 /// further details on the optionness of the return value.
3991 /// If our balance is too low to cover the cost of the next commitment transaction at the
3992 /// new feerate, the update is cancelled.
3994 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3995 /// [`Channel`] if `force_holding_cell` is false.
3996 fn send_update_fee<F: Deref, L: Deref>(
3997 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3998 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3999 ) -> Option<msgs::UpdateFee>
4000 where F::Target: FeeEstimator, L::Target: Logger
4002 if !self.context.is_outbound() {
4003 panic!("Cannot send fee from inbound channel");
4005 if !self.context.is_usable() {
4006 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
4008 if !self.context.is_live() {
4009 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
4012 // Before proposing a feerate update, check that we can actually afford the new fee.
4013 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
4014 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
4015 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4016 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4017 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
4018 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
4019 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4020 //TODO: auto-close after a number of failures?
4021 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
4025 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
4026 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4027 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4028 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4029 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4030 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4033 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4034 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4038 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4039 force_holding_cell = true;
4042 if force_holding_cell {
4043 self.context.holding_cell_update_fee = Some(feerate_per_kw);
4047 debug_assert!(self.context.pending_update_fee.is_none());
4048 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4050 Some(msgs::UpdateFee {
4051 channel_id: self.context.channel_id,
4056 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4057 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4059 /// No further message handling calls may be made until a channel_reestablish dance has
4061 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
4062 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
4063 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4064 if self.context.channel_state.is_pre_funded_state() {
4068 if self.context.channel_state.is_peer_disconnected() {
4069 // While the below code should be idempotent, it's simpler to just return early, as
4070 // redundant disconnect events can fire, though they should be rare.
4074 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4075 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4078 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4079 // will be retransmitted.
4080 self.context.last_sent_closing_fee = None;
4081 self.context.pending_counterparty_closing_signed = None;
4082 self.context.closing_fee_limits = None;
4084 let mut inbound_drop_count = 0;
4085 self.context.pending_inbound_htlcs.retain(|htlc| {
4087 InboundHTLCState::RemoteAnnounced(_) => {
4088 // They sent us an update_add_htlc but we never got the commitment_signed.
4089 // We'll tell them what commitment_signed we're expecting next and they'll drop
4090 // this HTLC accordingly
4091 inbound_drop_count += 1;
4094 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4095 // We received a commitment_signed updating this HTLC and (at least hopefully)
4096 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4097 // in response to it yet, so don't touch it.
4100 InboundHTLCState::Committed => true,
4101 InboundHTLCState::LocalRemoved(_) => {
4102 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4103 // re-transmit if needed) and they may have even sent a revoke_and_ack back
4104 // (that we missed). Keep this around for now and if they tell us they missed
4105 // the commitment_signed we can re-transmit the update then.
4110 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4112 if let Some((_, update_state)) = self.context.pending_update_fee {
4113 if update_state == FeeUpdateState::RemoteAnnounced {
4114 debug_assert!(!self.context.is_outbound());
4115 self.context.pending_update_fee = None;
4119 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4120 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4121 // They sent us an update to remove this but haven't yet sent the corresponding
4122 // commitment_signed, we need to move it back to Committed and they can re-send
4123 // the update upon reconnection.
4124 htlc.state = OutboundHTLCState::Committed;
4128 self.context.sent_message_awaiting_response = None;
4130 self.context.channel_state.set_peer_disconnected();
4131 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4135 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4136 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4137 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4138 /// update completes (potentially immediately).
4139 /// The messages which were generated with the monitor update must *not* have been sent to the
4140 /// remote end, and must instead have been dropped. They will be regenerated when
4141 /// [`Self::monitor_updating_restored`] is called.
4143 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4144 /// [`chain::Watch`]: crate::chain::Watch
4145 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4146 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4147 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4148 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4149 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4151 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4152 self.context.monitor_pending_commitment_signed |= resend_commitment;
4153 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4154 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4155 self.context.monitor_pending_failures.append(&mut pending_fails);
4156 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4157 self.context.channel_state.set_monitor_update_in_progress();
4160 /// Indicates that the latest ChannelMonitor update has been committed by the client
4161 /// successfully and we should restore normal operation. Returns messages which should be sent
4162 /// to the remote side.
4163 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4164 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4165 user_config: &UserConfig, best_block_height: u32
4166 ) -> MonitorRestoreUpdates
4169 NS::Target: NodeSigner
4171 assert!(self.context.channel_state.is_monitor_update_in_progress());
4172 self.context.channel_state.clear_monitor_update_in_progress();
4174 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4175 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4176 // first received the funding_signed.
4177 let mut funding_broadcastable =
4178 if self.context.is_outbound() &&
4179 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4180 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
4182 self.context.funding_transaction.take()
4184 // That said, if the funding transaction is already confirmed (ie we're active with a
4185 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4186 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4187 funding_broadcastable = None;
4190 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4191 // (and we assume the user never directly broadcasts the funding transaction and waits for
4192 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4193 // * an inbound channel that failed to persist the monitor on funding_created and we got
4194 // the funding transaction confirmed before the monitor was persisted, or
4195 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4196 let channel_ready = if self.context.monitor_pending_channel_ready {
4197 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4198 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4199 self.context.monitor_pending_channel_ready = false;
4200 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4201 Some(msgs::ChannelReady {
4202 channel_id: self.context.channel_id(),
4203 next_per_commitment_point,
4204 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4208 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4210 let mut accepted_htlcs = Vec::new();
4211 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4212 let mut failed_htlcs = Vec::new();
4213 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4214 let mut finalized_claimed_htlcs = Vec::new();
4215 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4217 if self.context.channel_state.is_peer_disconnected() {
4218 self.context.monitor_pending_revoke_and_ack = false;
4219 self.context.monitor_pending_commitment_signed = false;
4220 return MonitorRestoreUpdates {
4221 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4222 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4226 let raa = if self.context.monitor_pending_revoke_and_ack {
4227 Some(self.get_last_revoke_and_ack())
4229 let commitment_update = if self.context.monitor_pending_commitment_signed {
4230 self.get_last_commitment_update_for_send(logger).ok()
4232 if commitment_update.is_some() {
4233 self.mark_awaiting_response();
4236 self.context.monitor_pending_revoke_and_ack = false;
4237 self.context.monitor_pending_commitment_signed = false;
4238 let order = self.context.resend_order.clone();
4239 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4240 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4241 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4242 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4243 MonitorRestoreUpdates {
4244 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4248 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4249 where F::Target: FeeEstimator, L::Target: Logger
4251 if self.context.is_outbound() {
4252 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4254 if self.context.channel_state.is_peer_disconnected() {
4255 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4257 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4259 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4260 self.context.update_time_counter += 1;
4261 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4262 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4263 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4264 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4265 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4266 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4267 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4268 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4269 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4270 msg.feerate_per_kw, holder_tx_dust_exposure)));
4272 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4273 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4274 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4280 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4282 #[cfg(async_signing)]
4283 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4284 let commitment_update = if self.context.signer_pending_commitment_update {
4285 self.get_last_commitment_update_for_send(logger).ok()
4287 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4288 self.context.get_funding_signed_msg(logger).1
4290 let channel_ready = if funding_signed.is_some() {
4291 self.check_get_channel_ready(0)
4294 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
4295 if commitment_update.is_some() { "a" } else { "no" },
4296 if funding_signed.is_some() { "a" } else { "no" },
4297 if channel_ready.is_some() { "a" } else { "no" });
4299 SignerResumeUpdates {
4306 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4307 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4308 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4309 msgs::RevokeAndACK {
4310 channel_id: self.context.channel_id,
4311 per_commitment_secret,
4312 next_per_commitment_point,
4314 next_local_nonce: None,
4318 /// Gets the last commitment update for immediate sending to our peer.
4319 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4320 let mut update_add_htlcs = Vec::new();
4321 let mut update_fulfill_htlcs = Vec::new();
4322 let mut update_fail_htlcs = Vec::new();
4323 let mut update_fail_malformed_htlcs = Vec::new();
4325 for htlc in self.context.pending_outbound_htlcs.iter() {
4326 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4327 update_add_htlcs.push(msgs::UpdateAddHTLC {
4328 channel_id: self.context.channel_id(),
4329 htlc_id: htlc.htlc_id,
4330 amount_msat: htlc.amount_msat,
4331 payment_hash: htlc.payment_hash,
4332 cltv_expiry: htlc.cltv_expiry,
4333 onion_routing_packet: (**onion_packet).clone(),
4334 skimmed_fee_msat: htlc.skimmed_fee_msat,
4335 blinding_point: htlc.blinding_point,
4340 for htlc in self.context.pending_inbound_htlcs.iter() {
4341 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4343 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4344 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4345 channel_id: self.context.channel_id(),
4346 htlc_id: htlc.htlc_id,
4347 reason: err_packet.clone()
4350 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4351 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4352 channel_id: self.context.channel_id(),
4353 htlc_id: htlc.htlc_id,
4354 sha256_of_onion: sha256_of_onion.clone(),
4355 failure_code: failure_code.clone(),
4358 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4359 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4360 channel_id: self.context.channel_id(),
4361 htlc_id: htlc.htlc_id,
4362 payment_preimage: payment_preimage.clone(),
4369 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4370 Some(msgs::UpdateFee {
4371 channel_id: self.context.channel_id(),
4372 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4376 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4377 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4378 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4379 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4380 if self.context.signer_pending_commitment_update {
4381 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4382 self.context.signer_pending_commitment_update = false;
4386 #[cfg(not(async_signing))] {
4387 panic!("Failed to get signature for new commitment state");
4389 #[cfg(async_signing)] {
4390 if !self.context.signer_pending_commitment_update {
4391 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4392 self.context.signer_pending_commitment_update = true;
4397 Ok(msgs::CommitmentUpdate {
4398 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4403 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4404 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4405 if self.context.channel_state.is_local_shutdown_sent() {
4406 assert!(self.context.shutdown_scriptpubkey.is_some());
4407 Some(msgs::Shutdown {
4408 channel_id: self.context.channel_id,
4409 scriptpubkey: self.get_closing_scriptpubkey(),
4414 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4415 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4417 /// Some links printed in log lines are included here to check them during build (when run with
4418 /// `cargo doc --document-private-items`):
4419 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4420 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4421 pub fn channel_reestablish<L: Deref, NS: Deref>(
4422 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4423 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4424 ) -> Result<ReestablishResponses, ChannelError>
4427 NS::Target: NodeSigner
4429 if !self.context.channel_state.is_peer_disconnected() {
4430 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4431 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4432 // just close here instead of trying to recover.
4433 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4436 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4437 msg.next_local_commitment_number == 0 {
4438 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4441 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4442 if msg.next_remote_commitment_number > 0 {
4443 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4444 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4445 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4446 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4447 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4449 if msg.next_remote_commitment_number > our_commitment_transaction {
4450 macro_rules! log_and_panic {
4451 ($err_msg: expr) => {
4452 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4453 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4456 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4457 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4458 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4459 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4460 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4461 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4462 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4463 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4467 // Before we change the state of the channel, we check if the peer is sending a very old
4468 // commitment transaction number, if yes we send a warning message.
4469 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4470 return Err(ChannelError::Warn(format!(
4471 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4472 msg.next_remote_commitment_number,
4473 our_commitment_transaction
4477 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4478 // remaining cases either succeed or ErrorMessage-fail).
4479 self.context.channel_state.clear_peer_disconnected();
4480 self.context.sent_message_awaiting_response = None;
4482 let shutdown_msg = self.get_outbound_shutdown();
4484 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4486 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4487 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4488 if !self.context.channel_state.is_our_channel_ready() ||
4489 self.context.channel_state.is_monitor_update_in_progress() {
4490 if msg.next_remote_commitment_number != 0 {
4491 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4493 // Short circuit the whole handler as there is nothing we can resend them
4494 return Ok(ReestablishResponses {
4495 channel_ready: None,
4496 raa: None, commitment_update: None,
4497 order: RAACommitmentOrder::CommitmentFirst,
4498 shutdown_msg, announcement_sigs,
4502 // We have OurChannelReady set!
4503 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4504 return Ok(ReestablishResponses {
4505 channel_ready: Some(msgs::ChannelReady {
4506 channel_id: self.context.channel_id(),
4507 next_per_commitment_point,
4508 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4510 raa: None, commitment_update: None,
4511 order: RAACommitmentOrder::CommitmentFirst,
4512 shutdown_msg, announcement_sigs,
4516 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4517 // Remote isn't waiting on any RevokeAndACK from us!
4518 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4520 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4521 if self.context.channel_state.is_monitor_update_in_progress() {
4522 self.context.monitor_pending_revoke_and_ack = true;
4525 Some(self.get_last_revoke_and_ack())
4528 debug_assert!(false, "All values should have been handled in the four cases above");
4529 return Err(ChannelError::Close(format!(
4530 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4531 msg.next_remote_commitment_number,
4532 our_commitment_transaction
4536 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4537 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4538 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4539 // the corresponding revoke_and_ack back yet.
4540 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4541 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4542 self.mark_awaiting_response();
4544 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4546 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4547 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4548 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4549 Some(msgs::ChannelReady {
4550 channel_id: self.context.channel_id(),
4551 next_per_commitment_point,
4552 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4556 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4557 if required_revoke.is_some() {
4558 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4560 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4563 Ok(ReestablishResponses {
4564 channel_ready, shutdown_msg, announcement_sigs,
4565 raa: required_revoke,
4566 commitment_update: None,
4567 order: self.context.resend_order.clone(),
4569 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4570 if required_revoke.is_some() {
4571 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4573 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4576 if self.context.channel_state.is_monitor_update_in_progress() {
4577 self.context.monitor_pending_commitment_signed = true;
4578 Ok(ReestablishResponses {
4579 channel_ready, shutdown_msg, announcement_sigs,
4580 commitment_update: None, raa: None,
4581 order: self.context.resend_order.clone(),
4584 Ok(ReestablishResponses {
4585 channel_ready, shutdown_msg, announcement_sigs,
4586 raa: required_revoke,
4587 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4588 order: self.context.resend_order.clone(),
4591 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4592 Err(ChannelError::Close(format!(
4593 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4594 msg.next_local_commitment_number,
4595 next_counterparty_commitment_number,
4598 Err(ChannelError::Close(format!(
4599 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4600 msg.next_local_commitment_number,
4601 next_counterparty_commitment_number,
4606 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4607 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4608 /// at which point they will be recalculated.
4609 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4611 where F::Target: FeeEstimator
4613 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4615 // Propose a range from our current Background feerate to our Normal feerate plus our
4616 // force_close_avoidance_max_fee_satoshis.
4617 // If we fail to come to consensus, we'll have to force-close.
4618 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4619 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4620 // that we don't expect to need fee bumping
4621 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4622 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4624 // The spec requires that (when the channel does not have anchors) we only send absolute
4625 // channel fees no greater than the absolute channel fee on the current commitment
4626 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4627 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4628 // some force-closure by old nodes, but we wanted to close the channel anyway.
4630 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4631 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4632 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4633 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4636 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4637 // below our dust limit, causing the output to disappear. We don't bother handling this
4638 // case, however, as this should only happen if a channel is closed before any (material)
4639 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4640 // come to consensus with our counterparty on appropriate fees, however it should be a
4641 // relatively rare case. We can revisit this later, though note that in order to determine
4642 // if the funders' output is dust we have to know the absolute fee we're going to use.
4643 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4644 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4645 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4646 // We always add force_close_avoidance_max_fee_satoshis to our normal
4647 // feerate-calculated fee, but allow the max to be overridden if we're using a
4648 // target feerate-calculated fee.
4649 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4650 proposed_max_feerate as u64 * tx_weight / 1000)
4652 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4655 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4656 self.context.closing_fee_limits.clone().unwrap()
4659 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4660 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4661 /// this point if we're the funder we should send the initial closing_signed, and in any case
4662 /// shutdown should complete within a reasonable timeframe.
4663 fn closing_negotiation_ready(&self) -> bool {
4664 self.context.closing_negotiation_ready()
4667 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4668 /// an Err if no progress is being made and the channel should be force-closed instead.
4669 /// Should be called on a one-minute timer.
4670 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4671 if self.closing_negotiation_ready() {
4672 if self.context.closing_signed_in_flight {
4673 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4675 self.context.closing_signed_in_flight = true;
4681 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4682 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4683 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4684 where F::Target: FeeEstimator, L::Target: Logger
4686 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4687 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4688 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4689 // that closing_negotiation_ready checks this case (as well as a few others).
4690 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4691 return Ok((None, None, None));
4694 if !self.context.is_outbound() {
4695 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4696 return self.closing_signed(fee_estimator, &msg);
4698 return Ok((None, None, None));
4701 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4702 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4703 if self.context.expecting_peer_commitment_signed {
4704 return Ok((None, None, None));
4707 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4709 assert!(self.context.shutdown_scriptpubkey.is_some());
4710 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4711 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4712 our_min_fee, our_max_fee, total_fee_satoshis);
4714 match &self.context.holder_signer {
4715 ChannelSignerType::Ecdsa(ecdsa) => {
4717 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4718 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4720 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4721 Ok((Some(msgs::ClosingSigned {
4722 channel_id: self.context.channel_id,
4723 fee_satoshis: total_fee_satoshis,
4725 fee_range: Some(msgs::ClosingSignedFeeRange {
4726 min_fee_satoshis: our_min_fee,
4727 max_fee_satoshis: our_max_fee,
4731 // TODO (taproot|arik)
4737 // Marks a channel as waiting for a response from the counterparty. If it's not received
4738 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4740 fn mark_awaiting_response(&mut self) {
4741 self.context.sent_message_awaiting_response = Some(0);
4744 /// Determines whether we should disconnect the counterparty due to not receiving a response
4745 /// within our expected timeframe.
4747 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4748 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4749 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4752 // Don't disconnect when we're not waiting on a response.
4755 *ticks_elapsed += 1;
4756 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4760 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4761 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4763 if self.context.channel_state.is_peer_disconnected() {
4764 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4766 if self.context.channel_state.is_pre_funded_state() {
4767 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4768 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4769 // can do that via error message without getting a connection fail anyway...
4770 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4772 for htlc in self.context.pending_inbound_htlcs.iter() {
4773 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4774 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4777 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4779 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4780 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4783 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4784 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4785 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4788 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4791 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4792 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4793 // any further commitment updates after we set LocalShutdownSent.
4794 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
4796 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4799 assert!(send_shutdown);
4800 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4801 Ok(scriptpubkey) => scriptpubkey,
4802 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4804 if !shutdown_scriptpubkey.is_compatible(their_features) {
4805 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4807 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4812 // From here on out, we may not fail!
4814 self.context.channel_state.set_remote_shutdown_sent();
4815 self.context.update_time_counter += 1;
4817 let monitor_update = if update_shutdown_script {
4818 self.context.latest_monitor_update_id += 1;
4819 let monitor_update = ChannelMonitorUpdate {
4820 update_id: self.context.latest_monitor_update_id,
4821 counterparty_node_id: Some(self.context.counterparty_node_id),
4822 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4823 scriptpubkey: self.get_closing_scriptpubkey(),
4826 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4827 self.push_ret_blockable_mon_update(monitor_update)
4829 let shutdown = if send_shutdown {
4830 Some(msgs::Shutdown {
4831 channel_id: self.context.channel_id,
4832 scriptpubkey: self.get_closing_scriptpubkey(),
4836 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4837 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4838 // cell HTLCs and return them to fail the payment.
4839 self.context.holding_cell_update_fee = None;
4840 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4841 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4843 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4844 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4851 self.context.channel_state.set_local_shutdown_sent();
4852 self.context.update_time_counter += 1;
4854 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4857 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4858 let mut tx = closing_tx.trust().built_transaction().clone();
4860 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4862 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4863 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4864 let mut holder_sig = sig.serialize_der().to_vec();
4865 holder_sig.push(EcdsaSighashType::All as u8);
4866 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4867 cp_sig.push(EcdsaSighashType::All as u8);
4868 if funding_key[..] < counterparty_funding_key[..] {
4869 tx.input[0].witness.push(holder_sig);
4870 tx.input[0].witness.push(cp_sig);
4872 tx.input[0].witness.push(cp_sig);
4873 tx.input[0].witness.push(holder_sig);
4876 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4880 pub fn closing_signed<F: Deref>(
4881 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4882 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4883 where F::Target: FeeEstimator
4885 if !self.context.channel_state.is_both_sides_shutdown() {
4886 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4888 if self.context.channel_state.is_peer_disconnected() {
4889 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4891 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4892 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4894 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4895 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4898 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4899 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4902 if self.context.channel_state.is_monitor_update_in_progress() {
4903 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4904 return Ok((None, None, None));
4907 let funding_redeemscript = self.context.get_funding_redeemscript();
4908 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4909 if used_total_fee != msg.fee_satoshis {
4910 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4912 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4914 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4917 // The remote end may have decided to revoke their output due to inconsistent dust
4918 // limits, so check for that case by re-checking the signature here.
4919 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4920 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4921 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4925 for outp in closing_tx.trust().built_transaction().output.iter() {
4926 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4927 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4931 assert!(self.context.shutdown_scriptpubkey.is_some());
4932 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4933 if last_fee == msg.fee_satoshis {
4934 let shutdown_result = ShutdownResult {
4935 monitor_update: None,
4936 dropped_outbound_htlcs: Vec::new(),
4937 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4938 channel_id: self.context.channel_id,
4939 counterparty_node_id: self.context.counterparty_node_id,
4941 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4942 self.context.channel_state = ChannelState::ShutdownComplete;
4943 self.context.update_time_counter += 1;
4944 return Ok((None, Some(tx), Some(shutdown_result)));
4948 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4950 macro_rules! propose_fee {
4951 ($new_fee: expr) => {
4952 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4953 (closing_tx, $new_fee)
4955 self.build_closing_transaction($new_fee, false)
4958 return match &self.context.holder_signer {
4959 ChannelSignerType::Ecdsa(ecdsa) => {
4961 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4962 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4963 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4964 let shutdown_result = ShutdownResult {
4965 monitor_update: None,
4966 dropped_outbound_htlcs: Vec::new(),
4967 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4968 channel_id: self.context.channel_id,
4969 counterparty_node_id: self.context.counterparty_node_id,
4971 self.context.channel_state = ChannelState::ShutdownComplete;
4972 self.context.update_time_counter += 1;
4973 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4974 (Some(tx), Some(shutdown_result))
4979 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4980 Ok((Some(msgs::ClosingSigned {
4981 channel_id: self.context.channel_id,
4982 fee_satoshis: used_fee,
4984 fee_range: Some(msgs::ClosingSignedFeeRange {
4985 min_fee_satoshis: our_min_fee,
4986 max_fee_satoshis: our_max_fee,
4988 }), signed_tx, shutdown_result))
4990 // TODO (taproot|arik)
4997 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4998 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4999 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
5001 if max_fee_satoshis < our_min_fee {
5002 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
5004 if min_fee_satoshis > our_max_fee {
5005 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
5008 if !self.context.is_outbound() {
5009 // They have to pay, so pick the highest fee in the overlapping range.
5010 // We should never set an upper bound aside from their full balance
5011 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
5012 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
5014 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
5015 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
5016 msg.fee_satoshis, our_min_fee, our_max_fee)));
5018 // The proposed fee is in our acceptable range, accept it and broadcast!
5019 propose_fee!(msg.fee_satoshis);
5022 // Old fee style negotiation. We don't bother to enforce whether they are complying
5023 // with the "making progress" requirements, we just comply and hope for the best.
5024 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
5025 if msg.fee_satoshis > last_fee {
5026 if msg.fee_satoshis < our_max_fee {
5027 propose_fee!(msg.fee_satoshis);
5028 } else if last_fee < our_max_fee {
5029 propose_fee!(our_max_fee);
5031 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5034 if msg.fee_satoshis > our_min_fee {
5035 propose_fee!(msg.fee_satoshis);
5036 } else if last_fee > our_min_fee {
5037 propose_fee!(our_min_fee);
5039 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5043 if msg.fee_satoshis < our_min_fee {
5044 propose_fee!(our_min_fee);
5045 } else if msg.fee_satoshis > our_max_fee {
5046 propose_fee!(our_max_fee);
5048 propose_fee!(msg.fee_satoshis);
5054 fn internal_htlc_satisfies_config(
5055 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
5056 ) -> Result<(), (&'static str, u16)> {
5057 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
5058 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
5059 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
5060 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5062 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5063 0x1000 | 12, // fee_insufficient
5066 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5068 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5069 0x1000 | 13, // incorrect_cltv_expiry
5075 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5076 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5077 /// unsuccessful, falls back to the previous one if one exists.
5078 pub fn htlc_satisfies_config(
5079 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5080 ) -> Result<(), (&'static str, u16)> {
5081 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5083 if let Some(prev_config) = self.context.prev_config() {
5084 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5091 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5092 self.context.cur_holder_commitment_transaction_number + 1
5095 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5096 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
5099 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5100 self.context.cur_counterparty_commitment_transaction_number + 2
5104 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5105 &self.context.holder_signer
5109 pub fn get_value_stat(&self) -> ChannelValueStat {
5111 value_to_self_msat: self.context.value_to_self_msat,
5112 channel_value_msat: self.context.channel_value_satoshis * 1000,
5113 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5114 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5115 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5116 holding_cell_outbound_amount_msat: {
5118 for h in self.context.holding_cell_htlc_updates.iter() {
5120 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5128 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5129 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5133 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5134 /// Allowed in any state (including after shutdown)
5135 pub fn is_awaiting_monitor_update(&self) -> bool {
5136 self.context.channel_state.is_monitor_update_in_progress()
5139 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5140 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5141 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5142 self.context.blocked_monitor_updates[0].update.update_id - 1
5145 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5146 /// further blocked monitor update exists after the next.
5147 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5148 if self.context.blocked_monitor_updates.is_empty() { return None; }
5149 Some((self.context.blocked_monitor_updates.remove(0).update,
5150 !self.context.blocked_monitor_updates.is_empty()))
5153 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5154 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5155 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5156 -> Option<ChannelMonitorUpdate> {
5157 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5158 if !release_monitor {
5159 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5168 pub fn blocked_monitor_updates_pending(&self) -> usize {
5169 self.context.blocked_monitor_updates.len()
5172 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5173 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5174 /// transaction. If the channel is inbound, this implies simply that the channel has not
5176 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5177 if !self.is_awaiting_monitor_update() { return false; }
5179 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5180 if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
5182 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5183 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5184 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5187 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5188 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5189 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5190 // waiting for the initial monitor persistence. Thus, we check if our commitment
5191 // transaction numbers have both been iterated only exactly once (for the
5192 // funding_signed), and we're awaiting monitor update.
5194 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5195 // only way to get an awaiting-monitor-update state during initial funding is if the
5196 // initial monitor persistence is still pending).
5198 // Because deciding we're awaiting initial broadcast spuriously could result in
5199 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5200 // we hard-assert here, even in production builds.
5201 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5202 assert!(self.context.monitor_pending_channel_ready);
5203 assert_eq!(self.context.latest_monitor_update_id, 0);
5209 /// Returns true if our channel_ready has been sent
5210 pub fn is_our_channel_ready(&self) -> bool {
5211 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5212 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5215 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5216 pub fn received_shutdown(&self) -> bool {
5217 self.context.channel_state.is_remote_shutdown_sent()
5220 /// Returns true if we either initiated or agreed to shut down the channel.
5221 pub fn sent_shutdown(&self) -> bool {
5222 self.context.channel_state.is_local_shutdown_sent()
5225 /// Returns true if this channel is fully shut down. True here implies that no further actions
5226 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5227 /// will be handled appropriately by the chain monitor.
5228 pub fn is_shutdown(&self) -> bool {
5229 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5232 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5233 self.context.channel_update_status
5236 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5237 self.context.update_time_counter += 1;
5238 self.context.channel_update_status = status;
5241 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5243 // * always when a new block/transactions are confirmed with the new height
5244 // * when funding is signed with a height of 0
5245 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5249 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5250 if funding_tx_confirmations <= 0 {
5251 self.context.funding_tx_confirmation_height = 0;
5254 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5258 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5259 // channel_ready yet.
5260 if self.context.signer_pending_funding {
5264 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5265 // channel_ready until the entire batch is ready.
5266 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
5267 self.context.channel_state.set_our_channel_ready();
5269 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5270 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5271 self.context.update_time_counter += 1;
5273 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5274 // We got a reorg but not enough to trigger a force close, just ignore.
5277 if self.context.funding_tx_confirmation_height != 0 &&
5278 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5280 // We should never see a funding transaction on-chain until we've received
5281 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5282 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5283 // however, may do this and we shouldn't treat it as a bug.
5284 #[cfg(not(fuzzing))]
5285 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5286 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5287 self.context.channel_state.to_u32());
5289 // We got a reorg but not enough to trigger a force close, just ignore.
5293 if need_commitment_update {
5294 if !self.context.channel_state.is_monitor_update_in_progress() {
5295 if !self.context.channel_state.is_peer_disconnected() {
5296 let next_per_commitment_point =
5297 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5298 return Some(msgs::ChannelReady {
5299 channel_id: self.context.channel_id,
5300 next_per_commitment_point,
5301 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5305 self.context.monitor_pending_channel_ready = true;
5311 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5312 /// In the first case, we store the confirmation height and calculating the short channel id.
5313 /// In the second, we simply return an Err indicating we need to be force-closed now.
5314 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5315 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5316 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5317 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5319 NS::Target: NodeSigner,
5322 let mut msgs = (None, None);
5323 if let Some(funding_txo) = self.context.get_funding_txo() {
5324 for &(index_in_block, tx) in txdata.iter() {
5325 // Check if the transaction is the expected funding transaction, and if it is,
5326 // check that it pays the right amount to the right script.
5327 if self.context.funding_tx_confirmation_height == 0 {
5328 if tx.txid() == funding_txo.txid {
5329 let txo_idx = funding_txo.index as usize;
5330 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5331 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5332 if self.context.is_outbound() {
5333 // If we generated the funding transaction and it doesn't match what it
5334 // should, the client is really broken and we should just panic and
5335 // tell them off. That said, because hash collisions happen with high
5336 // probability in fuzzing mode, if we're fuzzing we just close the
5337 // channel and move on.
5338 #[cfg(not(fuzzing))]
5339 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5341 self.context.update_time_counter += 1;
5342 let err_reason = "funding tx had wrong script/value or output index";
5343 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5345 if self.context.is_outbound() {
5346 if !tx.is_coin_base() {
5347 for input in tx.input.iter() {
5348 if input.witness.is_empty() {
5349 // We generated a malleable funding transaction, implying we've
5350 // just exposed ourselves to funds loss to our counterparty.
5351 #[cfg(not(fuzzing))]
5352 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5357 self.context.funding_tx_confirmation_height = height;
5358 self.context.funding_tx_confirmed_in = Some(*block_hash);
5359 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5360 Ok(scid) => Some(scid),
5361 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5364 // If this is a coinbase transaction and not a 0-conf channel
5365 // we should update our min_depth to 100 to handle coinbase maturity
5366 if tx.is_coin_base() &&
5367 self.context.minimum_depth.unwrap_or(0) > 0 &&
5368 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5369 self.context.minimum_depth = Some(COINBASE_MATURITY);
5372 // If we allow 1-conf funding, we may need to check for channel_ready here and
5373 // send it immediately instead of waiting for a best_block_updated call (which
5374 // may have already happened for this block).
5375 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5376 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5377 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5378 msgs = (Some(channel_ready), announcement_sigs);
5381 for inp in tx.input.iter() {
5382 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5383 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5384 return Err(ClosureReason::CommitmentTxConfirmed);
5392 /// When a new block is connected, we check the height of the block against outbound holding
5393 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5394 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5395 /// handled by the ChannelMonitor.
5397 /// If we return Err, the channel may have been closed, at which point the standard
5398 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5401 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5403 pub fn best_block_updated<NS: Deref, L: Deref>(
5404 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5405 node_signer: &NS, user_config: &UserConfig, logger: &L
5406 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5408 NS::Target: NodeSigner,
5411 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5414 fn do_best_block_updated<NS: Deref, L: Deref>(
5415 &mut self, height: u32, highest_header_time: u32,
5416 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5417 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5419 NS::Target: NodeSigner,
5422 let mut timed_out_htlcs = Vec::new();
5423 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5424 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5426 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5427 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5429 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5430 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5431 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5439 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5441 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5442 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5443 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5445 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5446 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5449 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5450 self.context.channel_state.is_our_channel_ready() {
5451 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5452 if self.context.funding_tx_confirmation_height == 0 {
5453 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5454 // zero if it has been reorged out, however in either case, our state flags
5455 // indicate we've already sent a channel_ready
5456 funding_tx_confirmations = 0;
5459 // If we've sent channel_ready (or have both sent and received channel_ready), and
5460 // the funding transaction has become unconfirmed,
5461 // close the channel and hope we can get the latest state on chain (because presumably
5462 // the funding transaction is at least still in the mempool of most nodes).
5464 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5465 // 0-conf channel, but not doing so may lead to the
5466 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5468 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5469 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5470 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5471 return Err(ClosureReason::ProcessingError { err: err_reason });
5473 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5474 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5475 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5476 // If funding_tx_confirmed_in is unset, the channel must not be active
5477 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5478 assert!(!self.context.channel_state.is_our_channel_ready());
5479 return Err(ClosureReason::FundingTimedOut);
5482 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5483 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5485 Ok((None, timed_out_htlcs, announcement_sigs))
5488 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5489 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5490 /// before the channel has reached channel_ready and we can just wait for more blocks.
5491 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5492 if self.context.funding_tx_confirmation_height != 0 {
5493 // We handle the funding disconnection by calling best_block_updated with a height one
5494 // below where our funding was connected, implying a reorg back to conf_height - 1.
5495 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5496 // We use the time field to bump the current time we set on channel updates if its
5497 // larger. If we don't know that time has moved forward, we can just set it to the last
5498 // time we saw and it will be ignored.
5499 let best_time = self.context.update_time_counter;
5500 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
5501 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5502 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5503 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5504 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5510 // We never learned about the funding confirmation anyway, just ignore
5515 // Methods to get unprompted messages to send to the remote end (or where we already returned
5516 // something in the handler for the message that prompted this message):
5518 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5519 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5520 /// directions). Should be used for both broadcasted announcements and in response to an
5521 /// AnnouncementSignatures message from the remote peer.
5523 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5526 /// This will only return ChannelError::Ignore upon failure.
5528 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5529 fn get_channel_announcement<NS: Deref>(
5530 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5531 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5532 if !self.context.config.announced_channel {
5533 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5535 if !self.context.is_usable() {
5536 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5539 let short_channel_id = self.context.get_short_channel_id()
5540 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5541 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5542 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5543 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5544 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5546 let msg = msgs::UnsignedChannelAnnouncement {
5547 features: channelmanager::provided_channel_features(&user_config),
5550 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5551 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5552 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5553 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5554 excess_data: Vec::new(),
5560 fn get_announcement_sigs<NS: Deref, L: Deref>(
5561 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5562 best_block_height: u32, logger: &L
5563 ) -> Option<msgs::AnnouncementSignatures>
5565 NS::Target: NodeSigner,
5568 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5572 if !self.context.is_usable() {
5576 if self.context.channel_state.is_peer_disconnected() {
5577 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5581 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5585 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5586 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5589 log_trace!(logger, "{:?}", e);
5593 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5595 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5600 match &self.context.holder_signer {
5601 ChannelSignerType::Ecdsa(ecdsa) => {
5602 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5604 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5609 let short_channel_id = match self.context.get_short_channel_id() {
5611 None => return None,
5614 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5616 Some(msgs::AnnouncementSignatures {
5617 channel_id: self.context.channel_id(),
5619 node_signature: our_node_sig,
5620 bitcoin_signature: our_bitcoin_sig,
5623 // TODO (taproot|arik)
5629 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5631 fn sign_channel_announcement<NS: Deref>(
5632 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5633 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5634 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5635 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5636 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5637 let were_node_one = announcement.node_id_1 == our_node_key;
5639 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5640 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5641 match &self.context.holder_signer {
5642 ChannelSignerType::Ecdsa(ecdsa) => {
5643 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5644 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5645 Ok(msgs::ChannelAnnouncement {
5646 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5647 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5648 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5649 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5650 contents: announcement,
5653 // TODO (taproot|arik)
5658 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5662 /// Processes an incoming announcement_signatures message, providing a fully-signed
5663 /// channel_announcement message which we can broadcast and storing our counterparty's
5664 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5665 pub fn announcement_signatures<NS: Deref>(
5666 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5667 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5668 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5669 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5671 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5673 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5674 return Err(ChannelError::Close(format!(
5675 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5676 &announcement, self.context.get_counterparty_node_id())));
5678 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5679 return Err(ChannelError::Close(format!(
5680 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5681 &announcement, self.context.counterparty_funding_pubkey())));
5684 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5685 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5686 return Err(ChannelError::Ignore(
5687 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5690 self.sign_channel_announcement(node_signer, announcement)
5693 /// Gets a signed channel_announcement for this channel, if we previously received an
5694 /// announcement_signatures from our counterparty.
5695 pub fn get_signed_channel_announcement<NS: Deref>(
5696 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5697 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5698 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5701 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5703 Err(_) => return None,
5705 match self.sign_channel_announcement(node_signer, announcement) {
5706 Ok(res) => Some(res),
5711 /// May panic if called on a channel that wasn't immediately-previously
5712 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5713 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5714 assert!(self.context.channel_state.is_peer_disconnected());
5715 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5716 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5717 // current to_remote balances. However, it no longer has any use, and thus is now simply
5718 // set to a dummy (but valid, as required by the spec) public key.
5719 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5720 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5721 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5722 let mut pk = [2; 33]; pk[1] = 0xff;
5723 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5724 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5725 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5726 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5729 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5732 self.mark_awaiting_response();
5733 msgs::ChannelReestablish {
5734 channel_id: self.context.channel_id(),
5735 // The protocol has two different commitment number concepts - the "commitment
5736 // transaction number", which starts from 0 and counts up, and the "revocation key
5737 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5738 // commitment transaction numbers by the index which will be used to reveal the
5739 // revocation key for that commitment transaction, which means we have to convert them
5740 // to protocol-level commitment numbers here...
5742 // next_local_commitment_number is the next commitment_signed number we expect to
5743 // receive (indicating if they need to resend one that we missed).
5744 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5745 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5746 // receive, however we track it by the next commitment number for a remote transaction
5747 // (which is one further, as they always revoke previous commitment transaction, not
5748 // the one we send) so we have to decrement by 1. Note that if
5749 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5750 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
5752 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5753 your_last_per_commitment_secret: remote_last_secret,
5754 my_current_per_commitment_point: dummy_pubkey,
5755 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5756 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5757 // txid of that interactive transaction, else we MUST NOT set it.
5758 next_funding_txid: None,
5763 // Send stuff to our remote peers:
5765 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5766 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5767 /// commitment update.
5769 /// `Err`s will only be [`ChannelError::Ignore`].
5770 pub fn queue_add_htlc<F: Deref, L: Deref>(
5771 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5772 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5773 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5774 ) -> Result<(), ChannelError>
5775 where F::Target: FeeEstimator, L::Target: Logger
5778 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5779 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5780 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5782 if let ChannelError::Ignore(_) = err { /* fine */ }
5783 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5788 /// Adds a pending outbound HTLC to this channel, note that you probably want
5789 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5791 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5793 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5794 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5796 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5797 /// we may not yet have sent the previous commitment update messages and will need to
5798 /// regenerate them.
5800 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5801 /// on this [`Channel`] if `force_holding_cell` is false.
5803 /// `Err`s will only be [`ChannelError::Ignore`].
5804 fn send_htlc<F: Deref, L: Deref>(
5805 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5806 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5807 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5808 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5809 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5810 where F::Target: FeeEstimator, L::Target: Logger
5812 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5813 self.context.channel_state.is_local_shutdown_sent() ||
5814 self.context.channel_state.is_remote_shutdown_sent()
5816 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5818 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5819 if amount_msat > channel_total_msat {
5820 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5823 if amount_msat == 0 {
5824 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5827 let available_balances = self.context.get_available_balances(fee_estimator);
5828 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5829 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5830 available_balances.next_outbound_htlc_minimum_msat)));
5833 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5834 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5835 available_balances.next_outbound_htlc_limit_msat)));
5838 if self.context.channel_state.is_peer_disconnected() {
5839 // Note that this should never really happen, if we're !is_live() on receipt of an
5840 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5841 // the user to send directly into a !is_live() channel. However, if we
5842 // disconnected during the time the previous hop was doing the commitment dance we may
5843 // end up getting here after the forwarding delay. In any case, returning an
5844 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5845 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5848 let need_holding_cell = self.context.channel_state.should_force_holding_cell();
5849 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5850 payment_hash, amount_msat,
5851 if force_holding_cell { "into holding cell" }
5852 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5853 else { "to peer" });
5855 if need_holding_cell {
5856 force_holding_cell = true;
5859 // Now update local state:
5860 if force_holding_cell {
5861 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5866 onion_routing_packet,
5873 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5874 htlc_id: self.context.next_holder_htlc_id,
5876 payment_hash: payment_hash.clone(),
5878 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5884 let res = msgs::UpdateAddHTLC {
5885 channel_id: self.context.channel_id,
5886 htlc_id: self.context.next_holder_htlc_id,
5890 onion_routing_packet,
5894 self.context.next_holder_htlc_id += 1;
5899 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5900 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5901 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5902 // fail to generate this, we still are at least at a position where upgrading their status
5904 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5905 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5906 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5908 if let Some(state) = new_state {
5909 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5913 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5914 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5915 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5916 // Grab the preimage, if it exists, instead of cloning
5917 let mut reason = OutboundHTLCOutcome::Success(None);
5918 mem::swap(outcome, &mut reason);
5919 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5922 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5923 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5924 debug_assert!(!self.context.is_outbound());
5925 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5926 self.context.feerate_per_kw = feerate;
5927 self.context.pending_update_fee = None;
5930 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5932 let (mut htlcs_ref, counterparty_commitment_tx) =
5933 self.build_commitment_no_state_update(logger);
5934 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5935 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5936 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5938 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5939 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5942 self.context.latest_monitor_update_id += 1;
5943 let monitor_update = ChannelMonitorUpdate {
5944 update_id: self.context.latest_monitor_update_id,
5945 counterparty_node_id: Some(self.context.counterparty_node_id),
5946 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5947 commitment_txid: counterparty_commitment_txid,
5948 htlc_outputs: htlcs.clone(),
5949 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5950 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5951 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5952 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5953 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5956 self.context.channel_state.set_awaiting_remote_revoke();
5960 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5961 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5962 where L::Target: Logger
5964 let counterparty_keys = self.context.build_remote_transaction_keys();
5965 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5966 let counterparty_commitment_tx = commitment_stats.tx;
5968 #[cfg(any(test, fuzzing))]
5970 if !self.context.is_outbound() {
5971 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5972 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5973 if let Some(info) = projected_commit_tx_info {
5974 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5975 if info.total_pending_htlcs == total_pending_htlcs
5976 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5977 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5978 && info.feerate == self.context.feerate_per_kw {
5979 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5980 assert_eq!(actual_fee, info.fee);
5986 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5989 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5990 /// generation when we shouldn't change HTLC/channel state.
5991 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5992 // Get the fee tests from `build_commitment_no_state_update`
5993 #[cfg(any(test, fuzzing))]
5994 self.build_commitment_no_state_update(logger);
5996 let counterparty_keys = self.context.build_remote_transaction_keys();
5997 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5998 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
6000 match &self.context.holder_signer {
6001 ChannelSignerType::Ecdsa(ecdsa) => {
6002 let (signature, htlc_signatures);
6005 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
6006 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
6010 let res = ecdsa.sign_counterparty_commitment(
6011 &commitment_stats.tx,
6012 commitment_stats.inbound_htlc_preimages,
6013 commitment_stats.outbound_htlc_preimages,
6014 &self.context.secp_ctx,
6015 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
6017 htlc_signatures = res.1;
6019 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
6020 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
6021 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
6022 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
6024 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6025 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6026 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6027 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
6028 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
6029 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
6033 Ok((msgs::CommitmentSigned {
6034 channel_id: self.context.channel_id,
6038 partial_signature_with_nonce: None,
6039 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6041 // TODO (taproot|arik)
6047 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6048 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6050 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6051 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6052 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
6053 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
6054 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6055 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6056 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
6057 where F::Target: FeeEstimator, L::Target: Logger
6059 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
6060 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
6061 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6064 let monitor_update = self.build_commitment_no_status_check(logger);
6065 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6066 Ok(self.push_ret_blockable_mon_update(monitor_update))
6072 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
6074 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
6075 let new_forwarding_info = Some(CounterpartyForwardingInfo {
6076 fee_base_msat: msg.contents.fee_base_msat,
6077 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6078 cltv_expiry_delta: msg.contents.cltv_expiry_delta
6080 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
6082 self.context.counterparty_forwarding_info = new_forwarding_info;
6088 /// Begins the shutdown process, getting a message for the remote peer and returning all
6089 /// holding cell HTLCs for payment failure.
6090 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6091 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6092 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6094 for htlc in self.context.pending_outbound_htlcs.iter() {
6095 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6096 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6099 if self.context.channel_state.is_local_shutdown_sent() {
6100 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6102 else if self.context.channel_state.is_remote_shutdown_sent() {
6103 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6105 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6106 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6108 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6109 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6110 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6113 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6116 // use override shutdown script if provided
6117 let shutdown_scriptpubkey = match override_shutdown_script {
6118 Some(script) => script,
6120 // otherwise, use the shutdown scriptpubkey provided by the signer
6121 match signer_provider.get_shutdown_scriptpubkey() {
6122 Ok(scriptpubkey) => scriptpubkey,
6123 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6127 if !shutdown_scriptpubkey.is_compatible(their_features) {
6128 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6130 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6135 // From here on out, we may not fail!
6136 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6137 self.context.channel_state.set_local_shutdown_sent();
6138 self.context.update_time_counter += 1;
6140 let monitor_update = if update_shutdown_script {
6141 self.context.latest_monitor_update_id += 1;
6142 let monitor_update = ChannelMonitorUpdate {
6143 update_id: self.context.latest_monitor_update_id,
6144 counterparty_node_id: Some(self.context.counterparty_node_id),
6145 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6146 scriptpubkey: self.get_closing_scriptpubkey(),
6149 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6150 self.push_ret_blockable_mon_update(monitor_update)
6152 let shutdown = msgs::Shutdown {
6153 channel_id: self.context.channel_id,
6154 scriptpubkey: self.get_closing_scriptpubkey(),
6157 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6158 // our shutdown until we've committed all of the pending changes.
6159 self.context.holding_cell_update_fee = None;
6160 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6161 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6163 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6164 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6171 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6172 "we can't both complete shutdown and return a monitor update");
6174 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6177 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6178 self.context.holding_cell_htlc_updates.iter()
6179 .flat_map(|htlc_update| {
6181 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6182 => Some((source, payment_hash)),
6186 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6190 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6191 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6192 pub context: ChannelContext<SP>,
6193 pub unfunded_context: UnfundedChannelContext,
6196 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6197 pub fn new<ES: Deref, F: Deref>(
6198 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6199 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6200 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6201 ) -> Result<OutboundV1Channel<SP>, APIError>
6202 where ES::Target: EntropySource,
6203 F::Target: FeeEstimator
6205 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6206 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6207 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6208 let pubkeys = holder_signer.pubkeys().clone();
6210 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6211 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6213 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6214 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6216 let channel_value_msat = channel_value_satoshis * 1000;
6217 if push_msat > channel_value_msat {
6218 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6220 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6221 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6223 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6224 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6225 // Protocol level safety check in place, although it should never happen because
6226 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6227 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6230 let channel_type = Self::get_initial_channel_type(&config, their_features);
6231 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6233 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6234 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6236 (ConfirmationTarget::NonAnchorChannelFee, 0)
6238 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6240 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6241 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6242 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6243 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6246 let mut secp_ctx = Secp256k1::new();
6247 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6249 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6250 match signer_provider.get_shutdown_scriptpubkey() {
6251 Ok(scriptpubkey) => Some(scriptpubkey),
6252 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6256 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6257 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6258 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6262 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6263 Ok(script) => script,
6264 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6267 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6270 context: ChannelContext {
6273 config: LegacyChannelConfig {
6274 options: config.channel_config.clone(),
6275 announced_channel: config.channel_handshake_config.announced_channel,
6276 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6281 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6283 channel_id: temporary_channel_id,
6284 temporary_channel_id: Some(temporary_channel_id),
6285 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6286 announcement_sigs_state: AnnouncementSigsState::NotSent,
6288 channel_value_satoshis,
6290 latest_monitor_update_id: 0,
6292 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6293 shutdown_scriptpubkey,
6296 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6297 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6300 pending_inbound_htlcs: Vec::new(),
6301 pending_outbound_htlcs: Vec::new(),
6302 holding_cell_htlc_updates: Vec::new(),
6303 pending_update_fee: None,
6304 holding_cell_update_fee: None,
6305 next_holder_htlc_id: 0,
6306 next_counterparty_htlc_id: 0,
6307 update_time_counter: 1,
6309 resend_order: RAACommitmentOrder::CommitmentFirst,
6311 monitor_pending_channel_ready: false,
6312 monitor_pending_revoke_and_ack: false,
6313 monitor_pending_commitment_signed: false,
6314 monitor_pending_forwards: Vec::new(),
6315 monitor_pending_failures: Vec::new(),
6316 monitor_pending_finalized_fulfills: Vec::new(),
6318 signer_pending_commitment_update: false,
6319 signer_pending_funding: false,
6321 #[cfg(debug_assertions)]
6322 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6323 #[cfg(debug_assertions)]
6324 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6326 last_sent_closing_fee: None,
6327 pending_counterparty_closing_signed: None,
6328 expecting_peer_commitment_signed: false,
6329 closing_fee_limits: None,
6330 target_closing_feerate_sats_per_kw: None,
6332 funding_tx_confirmed_in: None,
6333 funding_tx_confirmation_height: 0,
6334 short_channel_id: None,
6335 channel_creation_height: current_chain_height,
6337 feerate_per_kw: commitment_feerate,
6338 counterparty_dust_limit_satoshis: 0,
6339 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6340 counterparty_max_htlc_value_in_flight_msat: 0,
6341 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6342 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6343 holder_selected_channel_reserve_satoshis,
6344 counterparty_htlc_minimum_msat: 0,
6345 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6346 counterparty_max_accepted_htlcs: 0,
6347 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6348 minimum_depth: None, // Filled in in accept_channel
6350 counterparty_forwarding_info: None,
6352 channel_transaction_parameters: ChannelTransactionParameters {
6353 holder_pubkeys: pubkeys,
6354 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6355 is_outbound_from_holder: true,
6356 counterparty_parameters: None,
6357 funding_outpoint: None,
6358 channel_type_features: channel_type.clone()
6360 funding_transaction: None,
6361 is_batch_funding: None,
6363 counterparty_cur_commitment_point: None,
6364 counterparty_prev_commitment_point: None,
6365 counterparty_node_id,
6367 counterparty_shutdown_scriptpubkey: None,
6369 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6371 channel_update_status: ChannelUpdateStatus::Enabled,
6372 closing_signed_in_flight: false,
6374 announcement_sigs: None,
6376 #[cfg(any(test, fuzzing))]
6377 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6378 #[cfg(any(test, fuzzing))]
6379 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6381 workaround_lnd_bug_4006: None,
6382 sent_message_awaiting_response: None,
6384 latest_inbound_scid_alias: None,
6385 outbound_scid_alias,
6387 channel_pending_event_emitted: false,
6388 channel_ready_event_emitted: false,
6390 #[cfg(any(test, fuzzing))]
6391 historical_inbound_htlc_fulfills: HashSet::new(),
6396 blocked_monitor_updates: Vec::new(),
6398 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6402 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
6403 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6404 let counterparty_keys = self.context.build_remote_transaction_keys();
6405 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6406 let signature = match &self.context.holder_signer {
6407 // TODO (taproot|arik): move match into calling method for Taproot
6408 ChannelSignerType::Ecdsa(ecdsa) => {
6409 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
6410 .map(|(sig, _)| sig).ok()?
6412 // TODO (taproot|arik)
6417 if self.context.signer_pending_funding {
6418 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
6419 self.context.signer_pending_funding = false;
6422 Some(msgs::FundingCreated {
6423 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
6424 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
6425 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
6428 partial_signature_with_nonce: None,
6430 next_local_nonce: None,
6434 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6435 /// a funding_created message for the remote peer.
6436 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6437 /// or if called on an inbound channel.
6438 /// Note that channel_id changes during this call!
6439 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6440 /// If an Err is returned, it is a ChannelError::Close.
6441 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6442 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
6443 if !self.context.is_outbound() {
6444 panic!("Tried to create outbound funding_created message on an inbound channel!");
6447 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6448 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6450 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6452 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6453 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6454 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6455 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6458 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6459 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6461 // Now that we're past error-generating stuff, update our local state:
6463 self.context.channel_state = ChannelState::FundingNegotiated;
6464 self.context.channel_id = funding_txo.to_channel_id();
6466 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6467 // We can skip this if it is a zero-conf channel.
6468 if funding_transaction.is_coin_base() &&
6469 self.context.minimum_depth.unwrap_or(0) > 0 &&
6470 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6471 self.context.minimum_depth = Some(COINBASE_MATURITY);
6474 self.context.funding_transaction = Some(funding_transaction);
6475 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6477 let funding_created = self.get_funding_created_msg(logger);
6478 if funding_created.is_none() {
6479 #[cfg(not(async_signing))] {
6480 panic!("Failed to get signature for new funding creation");
6482 #[cfg(async_signing)] {
6483 if !self.context.signer_pending_funding {
6484 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6485 self.context.signer_pending_funding = true;
6493 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6494 // The default channel type (ie the first one we try) depends on whether the channel is
6495 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6496 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6497 // with no other changes, and fall back to `only_static_remotekey`.
6498 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6499 if !config.channel_handshake_config.announced_channel &&
6500 config.channel_handshake_config.negotiate_scid_privacy &&
6501 their_features.supports_scid_privacy() {
6502 ret.set_scid_privacy_required();
6505 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6506 // set it now. If they don't understand it, we'll fall back to our default of
6507 // `only_static_remotekey`.
6508 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6509 their_features.supports_anchors_zero_fee_htlc_tx() {
6510 ret.set_anchors_zero_fee_htlc_tx_required();
6516 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6517 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6518 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6519 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6520 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6521 ) -> Result<msgs::OpenChannel, ()>
6523 F::Target: FeeEstimator
6525 if !self.context.is_outbound() ||
6527 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6528 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6533 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6534 // We've exhausted our options
6537 // We support opening a few different types of channels. Try removing our additional
6538 // features one by one until we've either arrived at our default or the counterparty has
6541 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6542 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6543 // checks whether the counterparty supports every feature, this would only happen if the
6544 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6546 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6547 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6548 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6549 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6550 } else if self.context.channel_type.supports_scid_privacy() {
6551 self.context.channel_type.clear_scid_privacy();
6553 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6555 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6556 Ok(self.get_open_channel(chain_hash))
6559 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6560 if !self.context.is_outbound() {
6561 panic!("Tried to open a channel for an inbound channel?");
6563 if self.context.have_received_message() {
6564 panic!("Cannot generate an open_channel after we've moved forward");
6567 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6568 panic!("Tried to send an open_channel for a channel that has already advanced");
6571 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6572 let keys = self.context.get_holder_pubkeys();
6576 temporary_channel_id: self.context.channel_id,
6577 funding_satoshis: self.context.channel_value_satoshis,
6578 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6579 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6580 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6581 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6582 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6583 feerate_per_kw: self.context.feerate_per_kw as u32,
6584 to_self_delay: self.context.get_holder_selected_contest_delay(),
6585 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6586 funding_pubkey: keys.funding_pubkey,
6587 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6588 payment_point: keys.payment_point,
6589 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6590 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6591 first_per_commitment_point,
6592 channel_flags: if self.context.config.announced_channel {1} else {0},
6593 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6594 Some(script) => script.clone().into_inner(),
6595 None => Builder::new().into_script(),
6597 channel_type: Some(self.context.channel_type.clone()),
6602 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6603 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6605 // Check sanity of message fields:
6606 if !self.context.is_outbound() {
6607 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6609 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
6610 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6612 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6613 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6615 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6616 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6618 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6619 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6621 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6622 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6623 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6625 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6626 if msg.htlc_minimum_msat >= full_channel_value_msat {
6627 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6629 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6630 if msg.to_self_delay > max_delay_acceptable {
6631 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6633 if msg.max_accepted_htlcs < 1 {
6634 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6636 if msg.max_accepted_htlcs > MAX_HTLCS {
6637 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6640 // Now check against optional parameters as set by config...
6641 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6642 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6644 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6645 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6647 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6648 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6650 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6651 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6653 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6654 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6656 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6657 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6659 if msg.minimum_depth > peer_limits.max_minimum_depth {
6660 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6663 if let Some(ty) = &msg.channel_type {
6664 if *ty != self.context.channel_type {
6665 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6667 } else if their_features.supports_channel_type() {
6668 // Assume they've accepted the channel type as they said they understand it.
6670 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6671 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6672 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6674 self.context.channel_type = channel_type.clone();
6675 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6678 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6679 match &msg.shutdown_scriptpubkey {
6680 &Some(ref script) => {
6681 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6682 if script.len() == 0 {
6685 if !script::is_bolt2_compliant(&script, their_features) {
6686 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6688 Some(script.clone())
6691 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6693 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6698 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6699 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6700 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6701 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6702 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6704 if peer_limits.trust_own_funding_0conf {
6705 self.context.minimum_depth = Some(msg.minimum_depth);
6707 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6710 let counterparty_pubkeys = ChannelPublicKeys {
6711 funding_pubkey: msg.funding_pubkey,
6712 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6713 payment_point: msg.payment_point,
6714 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6715 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6718 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6719 selected_contest_delay: msg.to_self_delay,
6720 pubkeys: counterparty_pubkeys,
6723 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6724 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6726 self.context.channel_state = ChannelState::NegotiatingFunding(
6727 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6729 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6734 /// Handles a funding_signed message from the remote end.
6735 /// If this call is successful, broadcast the funding transaction (and not before!)
6736 pub fn funding_signed<L: Deref>(
6737 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
6738 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
6742 if !self.context.is_outbound() {
6743 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
6745 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
6746 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
6748 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6749 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6750 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6751 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6754 let funding_script = self.context.get_funding_redeemscript();
6756 let counterparty_keys = self.context.build_remote_transaction_keys();
6757 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6758 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6759 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6761 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6762 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6764 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6765 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
6767 let trusted_tx = initial_commitment_tx.trust();
6768 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6769 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6770 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
6771 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
6772 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
6776 let holder_commitment_tx = HolderCommitmentTransaction::new(
6777 initial_commitment_tx,
6780 &self.context.get_holder_pubkeys().funding_pubkey,
6781 self.context.counterparty_funding_pubkey()
6785 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
6786 if validated.is_err() {
6787 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6790 let funding_redeemscript = self.context.get_funding_redeemscript();
6791 let funding_txo = self.context.get_funding_txo().unwrap();
6792 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6793 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6794 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6795 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6796 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6797 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6798 shutdown_script, self.context.get_holder_selected_contest_delay(),
6799 &self.context.destination_script, (funding_txo, funding_txo_script),
6800 &self.context.channel_transaction_parameters,
6801 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6803 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6804 channel_monitor.provide_initial_counterparty_commitment_tx(
6805 counterparty_initial_bitcoin_tx.txid, Vec::new(),
6806 self.context.cur_counterparty_commitment_transaction_number,
6807 self.context.counterparty_cur_commitment_point.unwrap(),
6808 counterparty_initial_commitment_tx.feerate_per_kw(),
6809 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6810 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6812 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
6813 if self.context.is_batch_funding() {
6814 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
6816 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
6818 self.context.cur_holder_commitment_transaction_number -= 1;
6819 self.context.cur_counterparty_commitment_transaction_number -= 1;
6821 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
6823 let mut channel = Channel { context: self.context };
6825 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6826 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6827 Ok((channel, channel_monitor))
6830 /// Indicates that the signer may have some signatures for us, so we should retry if we're
6832 #[cfg(async_signing)]
6833 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6834 if self.context.signer_pending_funding && self.context.is_outbound() {
6835 log_trace!(logger, "Signer unblocked a funding_created");
6836 self.get_funding_created_msg(logger)
6841 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6842 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6843 pub context: ChannelContext<SP>,
6844 pub unfunded_context: UnfundedChannelContext,
6847 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6848 /// Creates a new channel from a remote sides' request for one.
6849 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6850 pub fn new<ES: Deref, F: Deref, L: Deref>(
6851 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6852 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6853 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6854 current_chain_height: u32, logger: &L, is_0conf: bool,
6855 ) -> Result<InboundV1Channel<SP>, ChannelError>
6856 where ES::Target: EntropySource,
6857 F::Target: FeeEstimator,
6860 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6861 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6863 // First check the channel type is known, failing before we do anything else if we don't
6864 // support this channel type.
6865 let channel_type = if let Some(channel_type) = &msg.channel_type {
6866 if channel_type.supports_any_optional_bits() {
6867 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6870 // We only support the channel types defined by the `ChannelManager` in
6871 // `provided_channel_type_features`. The channel type must always support
6872 // `static_remote_key`.
6873 if !channel_type.requires_static_remote_key() {
6874 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6876 // Make sure we support all of the features behind the channel type.
6877 if !channel_type.is_subset(our_supported_features) {
6878 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6880 if channel_type.requires_scid_privacy() && announced_channel {
6881 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6883 channel_type.clone()
6885 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6886 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6887 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6892 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6893 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6894 let pubkeys = holder_signer.pubkeys().clone();
6895 let counterparty_pubkeys = ChannelPublicKeys {
6896 funding_pubkey: msg.funding_pubkey,
6897 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6898 payment_point: msg.payment_point,
6899 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6900 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6903 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6904 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6907 // Check sanity of message fields:
6908 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6909 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6911 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6912 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6914 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6915 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6917 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6918 if msg.push_msat > full_channel_value_msat {
6919 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6921 if msg.dust_limit_satoshis > msg.funding_satoshis {
6922 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6924 if msg.htlc_minimum_msat >= full_channel_value_msat {
6925 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6927 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6929 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6930 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6931 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6933 if msg.max_accepted_htlcs < 1 {
6934 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6936 if msg.max_accepted_htlcs > MAX_HTLCS {
6937 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6940 // Now check against optional parameters as set by config...
6941 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6942 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6944 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6945 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6947 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6948 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6950 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6951 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6953 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6954 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6956 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6957 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6959 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6960 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6963 // Convert things into internal flags and prep our state:
6965 if config.channel_handshake_limits.force_announced_channel_preference {
6966 if config.channel_handshake_config.announced_channel != announced_channel {
6967 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6971 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6972 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6973 // Protocol level safety check in place, although it should never happen because
6974 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6975 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6977 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6978 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6980 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6981 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6982 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6984 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6985 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6988 // check if the funder's amount for the initial commitment tx is sufficient
6989 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6990 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6991 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6995 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6996 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6997 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6998 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
7001 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
7002 // While it's reasonable for us to not meet the channel reserve initially (if they don't
7003 // want to push much to us), our counterparty should always have more than our reserve.
7004 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
7005 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
7008 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7009 match &msg.shutdown_scriptpubkey {
7010 &Some(ref script) => {
7011 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7012 if script.len() == 0 {
7015 if !script::is_bolt2_compliant(&script, their_features) {
7016 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
7018 Some(script.clone())
7021 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7023 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7028 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
7029 match signer_provider.get_shutdown_scriptpubkey() {
7030 Ok(scriptpubkey) => Some(scriptpubkey),
7031 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
7035 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
7036 if !shutdown_scriptpubkey.is_compatible(&their_features) {
7037 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
7041 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
7042 Ok(script) => script,
7043 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
7046 let mut secp_ctx = Secp256k1::new();
7047 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7049 let minimum_depth = if is_0conf {
7052 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
7056 context: ChannelContext {
7059 config: LegacyChannelConfig {
7060 options: config.channel_config.clone(),
7062 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
7067 inbound_handshake_limits_override: None,
7069 temporary_channel_id: Some(msg.temporary_channel_id),
7070 channel_id: msg.temporary_channel_id,
7071 channel_state: ChannelState::NegotiatingFunding(
7072 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7074 announcement_sigs_state: AnnouncementSigsState::NotSent,
7077 latest_monitor_update_id: 0,
7079 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7080 shutdown_scriptpubkey,
7083 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7084 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7085 value_to_self_msat: msg.push_msat,
7087 pending_inbound_htlcs: Vec::new(),
7088 pending_outbound_htlcs: Vec::new(),
7089 holding_cell_htlc_updates: Vec::new(),
7090 pending_update_fee: None,
7091 holding_cell_update_fee: None,
7092 next_holder_htlc_id: 0,
7093 next_counterparty_htlc_id: 0,
7094 update_time_counter: 1,
7096 resend_order: RAACommitmentOrder::CommitmentFirst,
7098 monitor_pending_channel_ready: false,
7099 monitor_pending_revoke_and_ack: false,
7100 monitor_pending_commitment_signed: false,
7101 monitor_pending_forwards: Vec::new(),
7102 monitor_pending_failures: Vec::new(),
7103 monitor_pending_finalized_fulfills: Vec::new(),
7105 signer_pending_commitment_update: false,
7106 signer_pending_funding: false,
7108 #[cfg(debug_assertions)]
7109 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7110 #[cfg(debug_assertions)]
7111 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7113 last_sent_closing_fee: None,
7114 pending_counterparty_closing_signed: None,
7115 expecting_peer_commitment_signed: false,
7116 closing_fee_limits: None,
7117 target_closing_feerate_sats_per_kw: None,
7119 funding_tx_confirmed_in: None,
7120 funding_tx_confirmation_height: 0,
7121 short_channel_id: None,
7122 channel_creation_height: current_chain_height,
7124 feerate_per_kw: msg.feerate_per_kw,
7125 channel_value_satoshis: msg.funding_satoshis,
7126 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
7127 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7128 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
7129 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
7130 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7131 holder_selected_channel_reserve_satoshis,
7132 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
7133 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7134 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
7135 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7138 counterparty_forwarding_info: None,
7140 channel_transaction_parameters: ChannelTransactionParameters {
7141 holder_pubkeys: pubkeys,
7142 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7143 is_outbound_from_holder: false,
7144 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7145 selected_contest_delay: msg.to_self_delay,
7146 pubkeys: counterparty_pubkeys,
7148 funding_outpoint: None,
7149 channel_type_features: channel_type.clone()
7151 funding_transaction: None,
7152 is_batch_funding: None,
7154 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7155 counterparty_prev_commitment_point: None,
7156 counterparty_node_id,
7158 counterparty_shutdown_scriptpubkey,
7160 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7162 channel_update_status: ChannelUpdateStatus::Enabled,
7163 closing_signed_in_flight: false,
7165 announcement_sigs: None,
7167 #[cfg(any(test, fuzzing))]
7168 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7169 #[cfg(any(test, fuzzing))]
7170 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7172 workaround_lnd_bug_4006: None,
7173 sent_message_awaiting_response: None,
7175 latest_inbound_scid_alias: None,
7176 outbound_scid_alias: 0,
7178 channel_pending_event_emitted: false,
7179 channel_ready_event_emitted: false,
7181 #[cfg(any(test, fuzzing))]
7182 historical_inbound_htlc_fulfills: HashSet::new(),
7187 blocked_monitor_updates: Vec::new(),
7189 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7195 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7196 /// should be sent back to the counterparty node.
7198 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7199 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7200 if self.context.is_outbound() {
7201 panic!("Tried to send accept_channel for an outbound channel?");
7204 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7205 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7207 panic!("Tried to send accept_channel after channel had moved forward");
7209 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7210 panic!("Tried to send an accept_channel for a channel that has already advanced");
7213 self.generate_accept_channel_message()
7216 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7217 /// inbound channel. If the intention is to accept an inbound channel, use
7218 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7220 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7221 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7222 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7223 let keys = self.context.get_holder_pubkeys();
7225 msgs::AcceptChannel {
7226 temporary_channel_id: self.context.channel_id,
7227 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7228 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7229 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7230 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7231 minimum_depth: self.context.minimum_depth.unwrap(),
7232 to_self_delay: self.context.get_holder_selected_contest_delay(),
7233 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7234 funding_pubkey: keys.funding_pubkey,
7235 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7236 payment_point: keys.payment_point,
7237 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7238 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7239 first_per_commitment_point,
7240 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7241 Some(script) => script.clone().into_inner(),
7242 None => Builder::new().into_script(),
7244 channel_type: Some(self.context.channel_type.clone()),
7246 next_local_nonce: None,
7250 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7251 /// inbound channel without accepting it.
7253 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7255 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7256 self.generate_accept_channel_message()
7259 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7260 let funding_script = self.context.get_funding_redeemscript();
7262 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7263 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7264 let trusted_tx = initial_commitment_tx.trust();
7265 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7266 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7267 // They sign the holder commitment transaction...
7268 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7269 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7270 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7271 encode::serialize_hex(&funding_script), &self.context.channel_id());
7272 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7274 Ok(initial_commitment_tx)
7277 pub fn funding_created<L: Deref>(
7278 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7279 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7283 if self.context.is_outbound() {
7284 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7287 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7288 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7290 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7291 // remember the channel, so it's safe to just send an error_message here and drop the
7293 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7295 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7296 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7297 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7298 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7301 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7302 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7303 // This is an externally observable change before we finish all our checks. In particular
7304 // check_funding_created_signature may fail.
7305 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7307 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7309 Err(ChannelError::Close(e)) => {
7310 self.context.channel_transaction_parameters.funding_outpoint = None;
7311 return Err((self, ChannelError::Close(e)));
7314 // The only error we know how to handle is ChannelError::Close, so we fall over here
7315 // to make sure we don't continue with an inconsistent state.
7316 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7320 let holder_commitment_tx = HolderCommitmentTransaction::new(
7321 initial_commitment_tx,
7324 &self.context.get_holder_pubkeys().funding_pubkey,
7325 self.context.counterparty_funding_pubkey()
7328 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7329 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7332 // Now that we're past error-generating stuff, update our local state:
7334 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7335 self.context.channel_id = funding_txo.to_channel_id();
7336 self.context.cur_counterparty_commitment_transaction_number -= 1;
7337 self.context.cur_holder_commitment_transaction_number -= 1;
7339 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7341 let funding_redeemscript = self.context.get_funding_redeemscript();
7342 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7343 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7344 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7345 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7346 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7347 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7348 shutdown_script, self.context.get_holder_selected_contest_delay(),
7349 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7350 &self.context.channel_transaction_parameters,
7351 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7353 holder_commitment_tx, best_block, self.context.counterparty_node_id);
7354 channel_monitor.provide_initial_counterparty_commitment_tx(
7355 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7356 self.context.cur_counterparty_commitment_transaction_number + 1,
7357 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7358 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7359 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7361 log_info!(logger, "{} funding_signed for peer for channel {}",
7362 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7364 // Promote the channel to a full-fledged one now that we have updated the state and have a
7365 // `ChannelMonitor`.
7366 let mut channel = Channel {
7367 context: self.context,
7369 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7370 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7372 Ok((channel, funding_signed, channel_monitor))
7376 const SERIALIZATION_VERSION: u8 = 3;
7377 const MIN_SERIALIZATION_VERSION: u8 = 3;
7379 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7385 impl Writeable for ChannelUpdateStatus {
7386 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7387 // We only care about writing out the current state as it was announced, ie only either
7388 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7389 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7391 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7392 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7393 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7394 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7400 impl Readable for ChannelUpdateStatus {
7401 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7402 Ok(match <u8 as Readable>::read(reader)? {
7403 0 => ChannelUpdateStatus::Enabled,
7404 1 => ChannelUpdateStatus::Disabled,
7405 _ => return Err(DecodeError::InvalidValue),
7410 impl Writeable for AnnouncementSigsState {
7411 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7412 // We only care about writing out the current state as if we had just disconnected, at
7413 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7415 AnnouncementSigsState::NotSent => 0u8.write(writer),
7416 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7417 AnnouncementSigsState::Committed => 0u8.write(writer),
7418 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7423 impl Readable for AnnouncementSigsState {
7424 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7425 Ok(match <u8 as Readable>::read(reader)? {
7426 0 => AnnouncementSigsState::NotSent,
7427 1 => AnnouncementSigsState::PeerReceived,
7428 _ => return Err(DecodeError::InvalidValue),
7433 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7434 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7435 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7438 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7440 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7441 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7442 // the low bytes now and the optional high bytes later.
7443 let user_id_low = self.context.user_id as u64;
7444 user_id_low.write(writer)?;
7446 // Version 1 deserializers expected to read parts of the config object here. Version 2
7447 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7448 // `minimum_depth` we simply write dummy values here.
7449 writer.write_all(&[0; 8])?;
7451 self.context.channel_id.write(writer)?;
7453 let mut channel_state = self.context.channel_state;
7454 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7455 channel_state.set_peer_disconnected();
7457 channel_state.to_u32().write(writer)?;
7459 self.context.channel_value_satoshis.write(writer)?;
7461 self.context.latest_monitor_update_id.write(writer)?;
7463 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7464 // deserialized from that format.
7465 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7466 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7467 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7469 self.context.destination_script.write(writer)?;
7471 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7472 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7473 self.context.value_to_self_msat.write(writer)?;
7475 let mut dropped_inbound_htlcs = 0;
7476 for htlc in self.context.pending_inbound_htlcs.iter() {
7477 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7478 dropped_inbound_htlcs += 1;
7481 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7482 for htlc in self.context.pending_inbound_htlcs.iter() {
7483 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7486 htlc.htlc_id.write(writer)?;
7487 htlc.amount_msat.write(writer)?;
7488 htlc.cltv_expiry.write(writer)?;
7489 htlc.payment_hash.write(writer)?;
7491 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7492 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7494 htlc_state.write(writer)?;
7496 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7498 htlc_state.write(writer)?;
7500 &InboundHTLCState::Committed => {
7503 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7505 removal_reason.write(writer)?;
7510 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7511 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7512 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7514 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7515 for htlc in self.context.pending_outbound_htlcs.iter() {
7516 htlc.htlc_id.write(writer)?;
7517 htlc.amount_msat.write(writer)?;
7518 htlc.cltv_expiry.write(writer)?;
7519 htlc.payment_hash.write(writer)?;
7520 htlc.source.write(writer)?;
7522 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7524 onion_packet.write(writer)?;
7526 &OutboundHTLCState::Committed => {
7529 &OutboundHTLCState::RemoteRemoved(_) => {
7530 // Treat this as a Committed because we haven't received the CS - they'll
7531 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7534 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7536 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7537 preimages.push(preimage);
7539 let reason: Option<&HTLCFailReason> = outcome.into();
7540 reason.write(writer)?;
7542 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7544 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7545 preimages.push(preimage);
7547 let reason: Option<&HTLCFailReason> = outcome.into();
7548 reason.write(writer)?;
7551 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7552 pending_outbound_blinding_points.push(htlc.blinding_point);
7555 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7556 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7557 // Vec of (htlc_id, failure_code, sha256_of_onion)
7558 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
7559 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7560 for update in self.context.holding_cell_htlc_updates.iter() {
7562 &HTLCUpdateAwaitingACK::AddHTLC {
7563 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7564 blinding_point, skimmed_fee_msat,
7567 amount_msat.write(writer)?;
7568 cltv_expiry.write(writer)?;
7569 payment_hash.write(writer)?;
7570 source.write(writer)?;
7571 onion_routing_packet.write(writer)?;
7573 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7574 holding_cell_blinding_points.push(blinding_point);
7576 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7578 payment_preimage.write(writer)?;
7579 htlc_id.write(writer)?;
7581 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7583 htlc_id.write(writer)?;
7584 err_packet.write(writer)?;
7586 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
7587 htlc_id, failure_code, sha256_of_onion
7589 // We don't want to break downgrading by adding a new variant, so write a dummy
7590 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
7591 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
7593 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
7595 htlc_id.write(writer)?;
7596 dummy_err_packet.write(writer)?;
7601 match self.context.resend_order {
7602 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7603 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7606 self.context.monitor_pending_channel_ready.write(writer)?;
7607 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7608 self.context.monitor_pending_commitment_signed.write(writer)?;
7610 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7611 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7612 pending_forward.write(writer)?;
7613 htlc_id.write(writer)?;
7616 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7617 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7618 htlc_source.write(writer)?;
7619 payment_hash.write(writer)?;
7620 fail_reason.write(writer)?;
7623 if self.context.is_outbound() {
7624 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7625 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7626 Some(feerate).write(writer)?;
7628 // As for inbound HTLCs, if the update was only announced and never committed in a
7629 // commitment_signed, drop it.
7630 None::<u32>.write(writer)?;
7632 self.context.holding_cell_update_fee.write(writer)?;
7634 self.context.next_holder_htlc_id.write(writer)?;
7635 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7636 self.context.update_time_counter.write(writer)?;
7637 self.context.feerate_per_kw.write(writer)?;
7639 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7640 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7641 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7642 // consider the stale state on reload.
7645 self.context.funding_tx_confirmed_in.write(writer)?;
7646 self.context.funding_tx_confirmation_height.write(writer)?;
7647 self.context.short_channel_id.write(writer)?;
7649 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7650 self.context.holder_dust_limit_satoshis.write(writer)?;
7651 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7653 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7654 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7656 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7657 self.context.holder_htlc_minimum_msat.write(writer)?;
7658 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7660 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7661 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7663 match &self.context.counterparty_forwarding_info {
7666 info.fee_base_msat.write(writer)?;
7667 info.fee_proportional_millionths.write(writer)?;
7668 info.cltv_expiry_delta.write(writer)?;
7670 None => 0u8.write(writer)?
7673 self.context.channel_transaction_parameters.write(writer)?;
7674 self.context.funding_transaction.write(writer)?;
7676 self.context.counterparty_cur_commitment_point.write(writer)?;
7677 self.context.counterparty_prev_commitment_point.write(writer)?;
7678 self.context.counterparty_node_id.write(writer)?;
7680 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7682 self.context.commitment_secrets.write(writer)?;
7684 self.context.channel_update_status.write(writer)?;
7686 #[cfg(any(test, fuzzing))]
7687 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7688 #[cfg(any(test, fuzzing))]
7689 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7690 htlc.write(writer)?;
7693 // If the channel type is something other than only-static-remote-key, then we need to have
7694 // older clients fail to deserialize this channel at all. If the type is
7695 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7697 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7698 Some(&self.context.channel_type) } else { None };
7700 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7701 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7702 // a different percentage of the channel value then 10%, which older versions of LDK used
7703 // to set it to before the percentage was made configurable.
7704 let serialized_holder_selected_reserve =
7705 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7706 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7708 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7709 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7710 let serialized_holder_htlc_max_in_flight =
7711 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7712 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7714 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7715 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7717 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7718 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7719 // we write the high bytes as an option here.
7720 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7722 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7724 write_tlv_fields!(writer, {
7725 (0, self.context.announcement_sigs, option),
7726 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7727 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7728 // them twice, once with their original default values above, and once as an option
7729 // here. On the read side, old versions will simply ignore the odd-type entries here,
7730 // and new versions map the default values to None and allow the TLV entries here to
7732 (1, self.context.minimum_depth, option),
7733 (2, chan_type, option),
7734 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7735 (4, serialized_holder_selected_reserve, option),
7736 (5, self.context.config, required),
7737 (6, serialized_holder_htlc_max_in_flight, option),
7738 (7, self.context.shutdown_scriptpubkey, option),
7739 (8, self.context.blocked_monitor_updates, optional_vec),
7740 (9, self.context.target_closing_feerate_sats_per_kw, option),
7741 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7742 (13, self.context.channel_creation_height, required),
7743 (15, preimages, required_vec),
7744 (17, self.context.announcement_sigs_state, required),
7745 (19, self.context.latest_inbound_scid_alias, option),
7746 (21, self.context.outbound_scid_alias, required),
7747 (23, channel_ready_event_emitted, option),
7748 (25, user_id_high_opt, option),
7749 (27, self.context.channel_keys_id, required),
7750 (28, holder_max_accepted_htlcs, option),
7751 (29, self.context.temporary_channel_id, option),
7752 (31, channel_pending_event_emitted, option),
7753 (35, pending_outbound_skimmed_fees, optional_vec),
7754 (37, holding_cell_skimmed_fees, optional_vec),
7755 (38, self.context.is_batch_funding, option),
7756 (39, pending_outbound_blinding_points, optional_vec),
7757 (41, holding_cell_blinding_points, optional_vec),
7758 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
7765 const MAX_ALLOC_SIZE: usize = 64*1024;
7766 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7768 ES::Target: EntropySource,
7769 SP::Target: SignerProvider
7771 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7772 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7773 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7775 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7776 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7777 // the low bytes now and the high bytes later.
7778 let user_id_low: u64 = Readable::read(reader)?;
7780 let mut config = Some(LegacyChannelConfig::default());
7782 // Read the old serialization of the ChannelConfig from version 0.0.98.
7783 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7784 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7785 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7786 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7788 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7789 let mut _val: u64 = Readable::read(reader)?;
7792 let channel_id = Readable::read(reader)?;
7793 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
7794 let channel_value_satoshis = Readable::read(reader)?;
7796 let latest_monitor_update_id = Readable::read(reader)?;
7798 let mut keys_data = None;
7800 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7801 // the `channel_keys_id` TLV is present below.
7802 let keys_len: u32 = Readable::read(reader)?;
7803 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7804 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7805 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7806 let mut data = [0; 1024];
7807 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7808 reader.read_exact(read_slice)?;
7809 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7813 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7814 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7815 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7818 let destination_script = Readable::read(reader)?;
7820 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7821 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7822 let value_to_self_msat = Readable::read(reader)?;
7824 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7826 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7827 for _ in 0..pending_inbound_htlc_count {
7828 pending_inbound_htlcs.push(InboundHTLCOutput {
7829 htlc_id: Readable::read(reader)?,
7830 amount_msat: Readable::read(reader)?,
7831 cltv_expiry: Readable::read(reader)?,
7832 payment_hash: Readable::read(reader)?,
7833 state: match <u8 as Readable>::read(reader)? {
7834 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7835 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7836 3 => InboundHTLCState::Committed,
7837 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7838 _ => return Err(DecodeError::InvalidValue),
7843 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7844 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7845 for _ in 0..pending_outbound_htlc_count {
7846 pending_outbound_htlcs.push(OutboundHTLCOutput {
7847 htlc_id: Readable::read(reader)?,
7848 amount_msat: Readable::read(reader)?,
7849 cltv_expiry: Readable::read(reader)?,
7850 payment_hash: Readable::read(reader)?,
7851 source: Readable::read(reader)?,
7852 state: match <u8 as Readable>::read(reader)? {
7853 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7854 1 => OutboundHTLCState::Committed,
7856 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7857 OutboundHTLCState::RemoteRemoved(option.into())
7860 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7861 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7864 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7865 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7867 _ => return Err(DecodeError::InvalidValue),
7869 skimmed_fee_msat: None,
7870 blinding_point: None,
7874 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7875 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7876 for _ in 0..holding_cell_htlc_update_count {
7877 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7878 0 => HTLCUpdateAwaitingACK::AddHTLC {
7879 amount_msat: Readable::read(reader)?,
7880 cltv_expiry: Readable::read(reader)?,
7881 payment_hash: Readable::read(reader)?,
7882 source: Readable::read(reader)?,
7883 onion_routing_packet: Readable::read(reader)?,
7884 skimmed_fee_msat: None,
7885 blinding_point: None,
7887 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7888 payment_preimage: Readable::read(reader)?,
7889 htlc_id: Readable::read(reader)?,
7891 2 => HTLCUpdateAwaitingACK::FailHTLC {
7892 htlc_id: Readable::read(reader)?,
7893 err_packet: Readable::read(reader)?,
7895 _ => return Err(DecodeError::InvalidValue),
7899 let resend_order = match <u8 as Readable>::read(reader)? {
7900 0 => RAACommitmentOrder::CommitmentFirst,
7901 1 => RAACommitmentOrder::RevokeAndACKFirst,
7902 _ => return Err(DecodeError::InvalidValue),
7905 let monitor_pending_channel_ready = Readable::read(reader)?;
7906 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7907 let monitor_pending_commitment_signed = Readable::read(reader)?;
7909 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7910 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7911 for _ in 0..monitor_pending_forwards_count {
7912 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7915 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7916 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7917 for _ in 0..monitor_pending_failures_count {
7918 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7921 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7923 let holding_cell_update_fee = Readable::read(reader)?;
7925 let next_holder_htlc_id = Readable::read(reader)?;
7926 let next_counterparty_htlc_id = Readable::read(reader)?;
7927 let update_time_counter = Readable::read(reader)?;
7928 let feerate_per_kw = Readable::read(reader)?;
7930 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7931 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7932 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7933 // consider the stale state on reload.
7934 match <u8 as Readable>::read(reader)? {
7937 let _: u32 = Readable::read(reader)?;
7938 let _: u64 = Readable::read(reader)?;
7939 let _: Signature = Readable::read(reader)?;
7941 _ => return Err(DecodeError::InvalidValue),
7944 let funding_tx_confirmed_in = Readable::read(reader)?;
7945 let funding_tx_confirmation_height = Readable::read(reader)?;
7946 let short_channel_id = Readable::read(reader)?;
7948 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7949 let holder_dust_limit_satoshis = Readable::read(reader)?;
7950 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7951 let mut counterparty_selected_channel_reserve_satoshis = None;
7953 // Read the old serialization from version 0.0.98.
7954 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7956 // Read the 8 bytes of backwards-compatibility data.
7957 let _dummy: u64 = Readable::read(reader)?;
7959 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7960 let holder_htlc_minimum_msat = Readable::read(reader)?;
7961 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7963 let mut minimum_depth = None;
7965 // Read the old serialization from version 0.0.98.
7966 minimum_depth = Some(Readable::read(reader)?);
7968 // Read the 4 bytes of backwards-compatibility data.
7969 let _dummy: u32 = Readable::read(reader)?;
7972 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7974 1 => Some(CounterpartyForwardingInfo {
7975 fee_base_msat: Readable::read(reader)?,
7976 fee_proportional_millionths: Readable::read(reader)?,
7977 cltv_expiry_delta: Readable::read(reader)?,
7979 _ => return Err(DecodeError::InvalidValue),
7982 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7983 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7985 let counterparty_cur_commitment_point = Readable::read(reader)?;
7987 let counterparty_prev_commitment_point = Readable::read(reader)?;
7988 let counterparty_node_id = Readable::read(reader)?;
7990 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7991 let commitment_secrets = Readable::read(reader)?;
7993 let channel_update_status = Readable::read(reader)?;
7995 #[cfg(any(test, fuzzing))]
7996 let mut historical_inbound_htlc_fulfills = HashSet::new();
7997 #[cfg(any(test, fuzzing))]
7999 let htlc_fulfills_len: u64 = Readable::read(reader)?;
8000 for _ in 0..htlc_fulfills_len {
8001 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
8005 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
8006 Some((feerate, if channel_parameters.is_outbound_from_holder {
8007 FeeUpdateState::Outbound
8009 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
8015 let mut announcement_sigs = None;
8016 let mut target_closing_feerate_sats_per_kw = None;
8017 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
8018 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
8019 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
8020 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
8021 // only, so we default to that if none was written.
8022 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
8023 let mut channel_creation_height = Some(serialized_height);
8024 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
8026 // If we read an old Channel, for simplicity we just treat it as "we never sent an
8027 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
8028 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
8029 let mut latest_inbound_scid_alias = None;
8030 let mut outbound_scid_alias = None;
8031 let mut channel_pending_event_emitted = None;
8032 let mut channel_ready_event_emitted = None;
8034 let mut user_id_high_opt: Option<u64> = None;
8035 let mut channel_keys_id: Option<[u8; 32]> = None;
8036 let mut temporary_channel_id: Option<ChannelId> = None;
8037 let mut holder_max_accepted_htlcs: Option<u16> = None;
8039 let mut blocked_monitor_updates = Some(Vec::new());
8041 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8042 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8044 let mut is_batch_funding: Option<()> = None;
8046 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8047 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8049 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8051 read_tlv_fields!(reader, {
8052 (0, announcement_sigs, option),
8053 (1, minimum_depth, option),
8054 (2, channel_type, option),
8055 (3, counterparty_selected_channel_reserve_satoshis, option),
8056 (4, holder_selected_channel_reserve_satoshis, option),
8057 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8058 (6, holder_max_htlc_value_in_flight_msat, option),
8059 (7, shutdown_scriptpubkey, option),
8060 (8, blocked_monitor_updates, optional_vec),
8061 (9, target_closing_feerate_sats_per_kw, option),
8062 (11, monitor_pending_finalized_fulfills, optional_vec),
8063 (13, channel_creation_height, option),
8064 (15, preimages_opt, optional_vec),
8065 (17, announcement_sigs_state, option),
8066 (19, latest_inbound_scid_alias, option),
8067 (21, outbound_scid_alias, option),
8068 (23, channel_ready_event_emitted, option),
8069 (25, user_id_high_opt, option),
8070 (27, channel_keys_id, option),
8071 (28, holder_max_accepted_htlcs, option),
8072 (29, temporary_channel_id, option),
8073 (31, channel_pending_event_emitted, option),
8074 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8075 (37, holding_cell_skimmed_fees_opt, optional_vec),
8076 (38, is_batch_funding, option),
8077 (39, pending_outbound_blinding_points_opt, optional_vec),
8078 (41, holding_cell_blinding_points_opt, optional_vec),
8079 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8082 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8083 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8084 // If we've gotten to the funding stage of the channel, populate the signer with its
8085 // required channel parameters.
8086 if channel_state >= ChannelState::FundingNegotiated {
8087 holder_signer.provide_channel_parameters(&channel_parameters);
8089 (channel_keys_id, holder_signer)
8091 // `keys_data` can be `None` if we had corrupted data.
8092 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8093 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8094 (holder_signer.channel_keys_id(), holder_signer)
8097 if let Some(preimages) = preimages_opt {
8098 let mut iter = preimages.into_iter();
8099 for htlc in pending_outbound_htlcs.iter_mut() {
8101 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8102 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8104 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8105 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8110 // We expect all preimages to be consumed above
8111 if iter.next().is_some() {
8112 return Err(DecodeError::InvalidValue);
8116 let chan_features = channel_type.as_ref().unwrap();
8117 if !chan_features.is_subset(our_supported_features) {
8118 // If the channel was written by a new version and negotiated with features we don't
8119 // understand yet, refuse to read it.
8120 return Err(DecodeError::UnknownRequiredFeature);
8123 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8124 // To account for that, we're proactively setting/overriding the field here.
8125 channel_parameters.channel_type_features = chan_features.clone();
8127 let mut secp_ctx = Secp256k1::new();
8128 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8130 // `user_id` used to be a single u64 value. In order to remain backwards
8131 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8132 // separate u64 values.
8133 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8135 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8137 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8138 let mut iter = skimmed_fees.into_iter();
8139 for htlc in pending_outbound_htlcs.iter_mut() {
8140 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8142 // We expect all skimmed fees to be consumed above
8143 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8145 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8146 let mut iter = skimmed_fees.into_iter();
8147 for htlc in holding_cell_htlc_updates.iter_mut() {
8148 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8149 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8152 // We expect all skimmed fees to be consumed above
8153 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8155 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8156 let mut iter = blinding_pts.into_iter();
8157 for htlc in pending_outbound_htlcs.iter_mut() {
8158 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8160 // We expect all blinding points to be consumed above
8161 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8163 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8164 let mut iter = blinding_pts.into_iter();
8165 for htlc in holding_cell_htlc_updates.iter_mut() {
8166 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8167 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8170 // We expect all blinding points to be consumed above
8171 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8174 if let Some(malformed_htlcs) = malformed_htlcs {
8175 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
8176 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
8177 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
8178 let matches = *htlc_id == malformed_htlc_id;
8179 if matches { debug_assert!(err_packet.data.is_empty()) }
8182 }).ok_or(DecodeError::InvalidValue)?;
8183 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
8184 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
8186 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
8191 context: ChannelContext {
8194 config: config.unwrap(),
8198 // Note that we don't care about serializing handshake limits as we only ever serialize
8199 // channel data after the handshake has completed.
8200 inbound_handshake_limits_override: None,
8203 temporary_channel_id,
8205 announcement_sigs_state: announcement_sigs_state.unwrap(),
8207 channel_value_satoshis,
8209 latest_monitor_update_id,
8211 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8212 shutdown_scriptpubkey,
8215 cur_holder_commitment_transaction_number,
8216 cur_counterparty_commitment_transaction_number,
8219 holder_max_accepted_htlcs,
8220 pending_inbound_htlcs,
8221 pending_outbound_htlcs,
8222 holding_cell_htlc_updates,
8226 monitor_pending_channel_ready,
8227 monitor_pending_revoke_and_ack,
8228 monitor_pending_commitment_signed,
8229 monitor_pending_forwards,
8230 monitor_pending_failures,
8231 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8233 signer_pending_commitment_update: false,
8234 signer_pending_funding: false,
8237 holding_cell_update_fee,
8238 next_holder_htlc_id,
8239 next_counterparty_htlc_id,
8240 update_time_counter,
8243 #[cfg(debug_assertions)]
8244 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8245 #[cfg(debug_assertions)]
8246 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8248 last_sent_closing_fee: None,
8249 pending_counterparty_closing_signed: None,
8250 expecting_peer_commitment_signed: false,
8251 closing_fee_limits: None,
8252 target_closing_feerate_sats_per_kw,
8254 funding_tx_confirmed_in,
8255 funding_tx_confirmation_height,
8257 channel_creation_height: channel_creation_height.unwrap(),
8259 counterparty_dust_limit_satoshis,
8260 holder_dust_limit_satoshis,
8261 counterparty_max_htlc_value_in_flight_msat,
8262 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8263 counterparty_selected_channel_reserve_satoshis,
8264 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8265 counterparty_htlc_minimum_msat,
8266 holder_htlc_minimum_msat,
8267 counterparty_max_accepted_htlcs,
8270 counterparty_forwarding_info,
8272 channel_transaction_parameters: channel_parameters,
8273 funding_transaction,
8276 counterparty_cur_commitment_point,
8277 counterparty_prev_commitment_point,
8278 counterparty_node_id,
8280 counterparty_shutdown_scriptpubkey,
8284 channel_update_status,
8285 closing_signed_in_flight: false,
8289 #[cfg(any(test, fuzzing))]
8290 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8291 #[cfg(any(test, fuzzing))]
8292 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8294 workaround_lnd_bug_4006: None,
8295 sent_message_awaiting_response: None,
8297 latest_inbound_scid_alias,
8298 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8299 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8301 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8302 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8304 #[cfg(any(test, fuzzing))]
8305 historical_inbound_htlc_fulfills,
8307 channel_type: channel_type.unwrap(),
8310 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8319 use bitcoin::blockdata::constants::ChainHash;
8320 use bitcoin::blockdata::script::{ScriptBuf, Builder};
8321 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8322 use bitcoin::blockdata::opcodes;
8323 use bitcoin::network::constants::Network;
8324 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
8325 use crate::ln::{PaymentHash, PaymentPreimage};
8326 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8327 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8328 use crate::ln::channel::InitFeatures;
8329 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8330 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8331 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8332 use crate::ln::msgs;
8333 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8334 use crate::ln::script::ShutdownScript;
8335 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8336 use crate::chain::BestBlock;
8337 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8338 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8339 use crate::chain::transaction::OutPoint;
8340 use crate::routing::router::{Path, RouteHop};
8341 use crate::util::config::UserConfig;
8342 use crate::util::errors::APIError;
8343 use crate::util::ser::{ReadableArgs, Writeable};
8344 use crate::util::test_utils;
8345 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8346 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8347 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8348 use bitcoin::secp256k1::{SecretKey,PublicKey};
8349 use bitcoin::hashes::sha256::Hash as Sha256;
8350 use bitcoin::hashes::Hash;
8351 use bitcoin::hashes::hex::FromHex;
8352 use bitcoin::hash_types::WPubkeyHash;
8353 use bitcoin::blockdata::locktime::absolute::LockTime;
8354 use bitcoin::address::{WitnessProgram, WitnessVersion};
8355 use crate::prelude::*;
8357 struct TestFeeEstimator {
8360 impl FeeEstimator for TestFeeEstimator {
8361 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8367 fn test_max_funding_satoshis_no_wumbo() {
8368 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8369 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8370 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8374 signer: InMemorySigner,
8377 impl EntropySource for Keys {
8378 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8381 impl SignerProvider for Keys {
8382 type EcdsaSigner = InMemorySigner;
8384 type TaprootSigner = InMemorySigner;
8386 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8387 self.signer.channel_keys_id()
8390 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8394 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8396 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8397 let secp_ctx = Secp256k1::signing_only();
8398 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8399 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8400 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8403 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8404 let secp_ctx = Secp256k1::signing_only();
8405 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8406 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8410 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8411 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8412 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8416 fn upfront_shutdown_script_incompatibility() {
8417 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8418 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8419 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8422 let seed = [42; 32];
8423 let network = Network::Testnet;
8424 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8425 keys_provider.expect(OnGetShutdownScriptpubkey {
8426 returns: non_v0_segwit_shutdown_script.clone(),
8429 let secp_ctx = Secp256k1::new();
8430 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8431 let config = UserConfig::default();
8432 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8433 Err(APIError::IncompatibleShutdownScript { script }) => {
8434 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8436 Err(e) => panic!("Unexpected error: {:?}", e),
8437 Ok(_) => panic!("Expected error"),
8441 // Check that, during channel creation, we use the same feerate in the open channel message
8442 // as we do in the Channel object creation itself.
8444 fn test_open_channel_msg_fee() {
8445 let original_fee = 253;
8446 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8447 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8448 let secp_ctx = Secp256k1::new();
8449 let seed = [42; 32];
8450 let network = Network::Testnet;
8451 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8453 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8454 let config = UserConfig::default();
8455 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8457 // Now change the fee so we can check that the fee in the open_channel message is the
8458 // same as the old fee.
8459 fee_est.fee_est = 500;
8460 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8461 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8465 fn test_holder_vs_counterparty_dust_limit() {
8466 // Test that when calculating the local and remote commitment transaction fees, the correct
8467 // dust limits are used.
8468 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8469 let secp_ctx = Secp256k1::new();
8470 let seed = [42; 32];
8471 let network = Network::Testnet;
8472 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8473 let logger = test_utils::TestLogger::new();
8474 let best_block = BestBlock::from_network(network);
8476 // Go through the flow of opening a channel between two nodes, making sure
8477 // they have different dust limits.
8479 // Create Node A's channel pointing to Node B's pubkey
8480 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8481 let config = UserConfig::default();
8482 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8484 // Create Node B's channel by receiving Node A's open_channel message
8485 // Make sure A's dust limit is as we expect.
8486 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8487 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8488 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8490 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8491 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8492 accept_channel_msg.dust_limit_satoshis = 546;
8493 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8494 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8496 // Node A --> Node B: funding created
8497 let output_script = node_a_chan.context.get_funding_redeemscript();
8498 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8499 value: 10000000, script_pubkey: output_script.clone(),
8501 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8502 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8503 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8505 // Node B --> Node A: funding signed
8506 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8507 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8509 // Put some inbound and outbound HTLCs in A's channel.
8510 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8511 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8513 amount_msat: htlc_amount_msat,
8514 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8515 cltv_expiry: 300000000,
8516 state: InboundHTLCState::Committed,
8519 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8521 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8522 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8523 cltv_expiry: 200000000,
8524 state: OutboundHTLCState::Committed,
8525 source: HTLCSource::OutboundRoute {
8526 path: Path { hops: Vec::new(), blinded_tail: None },
8527 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8528 first_hop_htlc_msat: 548,
8529 payment_id: PaymentId([42; 32]),
8531 skimmed_fee_msat: None,
8532 blinding_point: None,
8535 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8536 // the dust limit check.
8537 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8538 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8539 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8540 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8542 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8543 // of the HTLCs are seen to be above the dust limit.
8544 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8545 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8546 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8547 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8548 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8552 fn test_timeout_vs_success_htlc_dust_limit() {
8553 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8554 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8555 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8556 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8557 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8558 let secp_ctx = Secp256k1::new();
8559 let seed = [42; 32];
8560 let network = Network::Testnet;
8561 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8563 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8564 let config = UserConfig::default();
8565 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8567 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8568 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8570 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8571 // counted as dust when it shouldn't be.
8572 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8573 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8574 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8575 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8577 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8578 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8579 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8580 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8581 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8583 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8585 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8586 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8587 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8588 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8589 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8591 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8592 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8593 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8594 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8595 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8599 fn channel_reestablish_no_updates() {
8600 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8601 let logger = test_utils::TestLogger::new();
8602 let secp_ctx = Secp256k1::new();
8603 let seed = [42; 32];
8604 let network = Network::Testnet;
8605 let best_block = BestBlock::from_network(network);
8606 let chain_hash = ChainHash::using_genesis_block(network);
8607 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8609 // Go through the flow of opening a channel between two nodes.
8611 // Create Node A's channel pointing to Node B's pubkey
8612 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8613 let config = UserConfig::default();
8614 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8616 // Create Node B's channel by receiving Node A's open_channel message
8617 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8618 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8619 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8621 // Node B --> Node A: accept channel
8622 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8623 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8625 // Node A --> Node B: funding created
8626 let output_script = node_a_chan.context.get_funding_redeemscript();
8627 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8628 value: 10000000, script_pubkey: output_script.clone(),
8630 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8631 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8632 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8634 // Node B --> Node A: funding signed
8635 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8636 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8638 // Now disconnect the two nodes and check that the commitment point in
8639 // Node B's channel_reestablish message is sane.
8640 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8641 let msg = node_b_chan.get_channel_reestablish(&&logger);
8642 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8643 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8644 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8646 // Check that the commitment point in Node A's channel_reestablish message
8648 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8649 let msg = node_a_chan.get_channel_reestablish(&&logger);
8650 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8651 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8652 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8656 fn test_configured_holder_max_htlc_value_in_flight() {
8657 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8658 let logger = test_utils::TestLogger::new();
8659 let secp_ctx = Secp256k1::new();
8660 let seed = [42; 32];
8661 let network = Network::Testnet;
8662 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8663 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8664 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8666 let mut config_2_percent = UserConfig::default();
8667 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8668 let mut config_99_percent = UserConfig::default();
8669 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8670 let mut config_0_percent = UserConfig::default();
8671 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8672 let mut config_101_percent = UserConfig::default();
8673 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8675 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8676 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8677 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8678 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8679 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8680 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8682 // Test with the upper bound - 1 of valid values (99%).
8683 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8684 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8685 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8687 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8689 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8690 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8691 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8692 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8693 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8694 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8696 // Test with the upper bound - 1 of valid values (99%).
8697 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8698 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8699 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8701 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8702 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8703 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8704 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8705 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8707 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8708 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8710 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8711 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8712 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8714 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8715 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8716 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8717 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8718 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8720 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8721 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8723 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8724 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8725 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8729 fn test_configured_holder_selected_channel_reserve_satoshis() {
8731 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8732 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8733 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8735 // Test with valid but unreasonably high channel reserves
8736 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8737 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8738 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8740 // Test with calculated channel reserve less than lower bound
8741 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8742 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8744 // Test with invalid channel reserves since sum of both is greater than or equal
8746 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8747 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8750 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8751 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8752 let logger = test_utils::TestLogger::new();
8753 let secp_ctx = Secp256k1::new();
8754 let seed = [42; 32];
8755 let network = Network::Testnet;
8756 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8757 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8758 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8761 let mut outbound_node_config = UserConfig::default();
8762 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8763 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8765 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8766 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8768 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8769 let mut inbound_node_config = UserConfig::default();
8770 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8772 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8773 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8775 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8777 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8778 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8780 // Channel Negotiations failed
8781 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8782 assert!(result.is_err());
8787 fn channel_update() {
8788 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8789 let logger = test_utils::TestLogger::new();
8790 let secp_ctx = Secp256k1::new();
8791 let seed = [42; 32];
8792 let network = Network::Testnet;
8793 let best_block = BestBlock::from_network(network);
8794 let chain_hash = ChainHash::using_genesis_block(network);
8795 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8797 // Create Node A's channel pointing to Node B's pubkey
8798 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8799 let config = UserConfig::default();
8800 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8802 // Create Node B's channel by receiving Node A's open_channel message
8803 // Make sure A's dust limit is as we expect.
8804 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8805 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8806 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8808 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8809 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8810 accept_channel_msg.dust_limit_satoshis = 546;
8811 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8812 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8814 // Node A --> Node B: funding created
8815 let output_script = node_a_chan.context.get_funding_redeemscript();
8816 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8817 value: 10000000, script_pubkey: output_script.clone(),
8819 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8820 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8821 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8823 // Node B --> Node A: funding signed
8824 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8825 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8827 // Make sure that receiving a channel update will update the Channel as expected.
8828 let update = ChannelUpdate {
8829 contents: UnsignedChannelUpdate {
8831 short_channel_id: 0,
8834 cltv_expiry_delta: 100,
8835 htlc_minimum_msat: 5,
8836 htlc_maximum_msat: MAX_VALUE_MSAT,
8838 fee_proportional_millionths: 11,
8839 excess_data: Vec::new(),
8841 signature: Signature::from(unsafe { FFISignature::new() })
8843 assert!(node_a_chan.channel_update(&update).unwrap());
8845 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8846 // change our official htlc_minimum_msat.
8847 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8848 match node_a_chan.context.counterparty_forwarding_info() {
8850 assert_eq!(info.cltv_expiry_delta, 100);
8851 assert_eq!(info.fee_base_msat, 110);
8852 assert_eq!(info.fee_proportional_millionths, 11);
8854 None => panic!("expected counterparty forwarding info to be Some")
8857 assert!(!node_a_chan.channel_update(&update).unwrap());
8861 fn blinding_point_skimmed_fee_malformed_ser() {
8862 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
8864 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8865 let secp_ctx = Secp256k1::new();
8866 let seed = [42; 32];
8867 let network = Network::Testnet;
8868 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8870 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8871 let config = UserConfig::default();
8872 let features = channelmanager::provided_init_features(&config);
8873 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8874 let mut chan = Channel { context: outbound_chan.context };
8876 let dummy_htlc_source = HTLCSource::OutboundRoute {
8878 hops: vec![RouteHop {
8879 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8880 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8881 cltv_expiry_delta: 0, maybe_announced_channel: false,
8885 session_priv: test_utils::privkey(42),
8886 first_hop_htlc_msat: 0,
8887 payment_id: PaymentId([42; 32]),
8889 let dummy_outbound_output = OutboundHTLCOutput {
8892 payment_hash: PaymentHash([43; 32]),
8894 state: OutboundHTLCState::Committed,
8895 source: dummy_htlc_source.clone(),
8896 skimmed_fee_msat: None,
8897 blinding_point: None,
8899 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8900 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8902 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8905 htlc.skimmed_fee_msat = Some(1);
8908 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8910 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8913 payment_hash: PaymentHash([43; 32]),
8914 source: dummy_htlc_source.clone(),
8915 onion_routing_packet: msgs::OnionPacket {
8917 public_key: Ok(test_utils::pubkey(1)),
8918 hop_data: [0; 20*65],
8921 skimmed_fee_msat: None,
8922 blinding_point: None,
8924 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8925 payment_preimage: PaymentPreimage([42; 32]),
8928 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
8929 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
8931 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
8932 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
8934 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
8937 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8938 } else if i % 5 == 1 {
8939 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8940 } else if i % 5 == 2 {
8941 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8942 if let HTLCUpdateAwaitingACK::AddHTLC {
8943 ref mut blinding_point, ref mut skimmed_fee_msat, ..
8944 } = &mut dummy_add {
8945 *blinding_point = Some(test_utils::pubkey(42 + i));
8946 *skimmed_fee_msat = Some(42);
8948 holding_cell_htlc_updates.push(dummy_add);
8949 } else if i % 5 == 3 {
8950 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
8952 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
8955 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8957 // Encode and decode the channel and ensure that the HTLCs within are the same.
8958 let encoded_chan = chan.encode();
8959 let mut s = crate::io::Cursor::new(&encoded_chan);
8960 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8961 let features = channelmanager::provided_channel_type_features(&config);
8962 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8963 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8964 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8967 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8969 fn outbound_commitment_test() {
8970 use bitcoin::sighash;
8971 use bitcoin::consensus::encode::serialize;
8972 use bitcoin::sighash::EcdsaSighashType;
8973 use bitcoin::hashes::hex::FromHex;
8974 use bitcoin::hash_types::Txid;
8975 use bitcoin::secp256k1::Message;
8976 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8977 use crate::ln::PaymentPreimage;
8978 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8979 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8980 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8981 use crate::util::logger::Logger;
8982 use crate::sync::Arc;
8983 use core::str::FromStr;
8984 use hex::DisplayHex;
8986 // Test vectors from BOLT 3 Appendices C and F (anchors):
8987 let feeest = TestFeeEstimator{fee_est: 15000};
8988 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
8989 let secp_ctx = Secp256k1::new();
8991 let mut signer = InMemorySigner::new(
8993 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8994 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8995 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8996 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8997 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8999 // These aren't set in the test vectors:
9000 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
9006 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
9007 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
9008 let keys_provider = Keys { signer: signer.clone() };
9010 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9011 let mut config = UserConfig::default();
9012 config.channel_handshake_config.announced_channel = false;
9013 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
9014 chan.context.holder_dust_limit_satoshis = 546;
9015 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
9017 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
9019 let counterparty_pubkeys = ChannelPublicKeys {
9020 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
9021 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
9022 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
9023 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
9024 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
9026 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
9027 CounterpartyChannelTransactionParameters {
9028 pubkeys: counterparty_pubkeys.clone(),
9029 selected_contest_delay: 144
9031 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
9032 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9034 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9035 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9037 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9038 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9040 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9041 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9043 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9044 // derived from a commitment_seed, so instead we copy it here and call
9045 // build_commitment_transaction.
9046 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9047 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9048 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9049 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9050 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9052 macro_rules! test_commitment {
9053 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9054 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9055 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9059 macro_rules! test_commitment_with_anchors {
9060 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9061 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9062 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9066 macro_rules! test_commitment_common {
9067 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9068 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9070 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9071 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9073 let htlcs = commitment_stats.htlcs_included.drain(..)
9074 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9076 (commitment_stats.tx, htlcs)
9078 let trusted_tx = commitment_tx.trust();
9079 let unsigned_tx = trusted_tx.built_transaction();
9080 let redeemscript = chan.context.get_funding_redeemscript();
9081 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9082 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9083 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9084 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9086 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9087 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9088 let mut counterparty_htlc_sigs = Vec::new();
9089 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9091 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9092 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9093 counterparty_htlc_sigs.push(remote_signature);
9095 assert_eq!(htlcs.len(), per_htlc.len());
9097 let holder_commitment_tx = HolderCommitmentTransaction::new(
9098 commitment_tx.clone(),
9099 counterparty_signature,
9100 counterparty_htlc_sigs,
9101 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9102 chan.context.counterparty_funding_pubkey()
9104 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9105 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9107 let funding_redeemscript = chan.context.get_funding_redeemscript();
9108 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9109 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9111 // ((htlc, counterparty_sig), (index, holder_sig))
9112 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
9115 log_trace!(logger, "verifying htlc {}", $htlc_idx);
9116 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9118 let ref htlc = htlcs[$htlc_idx];
9119 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
9120 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9121 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9122 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9123 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9124 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9125 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9127 let mut preimage: Option<PaymentPreimage> = None;
9130 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9131 if out == htlc.payment_hash {
9132 preimage = Some(PaymentPreimage([i; 32]));
9136 assert!(preimage.is_some());
9139 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
9140 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
9141 channel_derivation_parameters: ChannelDerivationParameters {
9142 value_satoshis: chan.context.channel_value_satoshis,
9143 keys_id: chan.context.channel_keys_id,
9144 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9146 commitment_txid: trusted_tx.txid(),
9147 per_commitment_number: trusted_tx.commitment_number(),
9148 per_commitment_point: trusted_tx.per_commitment_point(),
9149 feerate_per_kw: trusted_tx.feerate_per_kw(),
9151 preimage: preimage.clone(),
9152 counterparty_sig: *htlc_counterparty_sig,
9153 }, &secp_ctx).unwrap();
9154 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9155 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9157 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9158 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9159 let trusted_tx = holder_commitment_tx.trust();
9160 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9161 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9162 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9164 assert!(htlc_counterparty_sig_iter.next().is_none());
9168 // anchors: simple commitment tx with no HTLCs and single anchor
9169 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9170 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9171 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9173 // simple commitment tx with no HTLCs
9174 chan.context.value_to_self_msat = 7000000000;
9176 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9177 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9178 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9180 // anchors: simple commitment tx with no HTLCs
9181 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9182 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9183 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9185 chan.context.pending_inbound_htlcs.push({
9186 let mut out = InboundHTLCOutput{
9188 amount_msat: 1000000,
9190 payment_hash: PaymentHash([0; 32]),
9191 state: InboundHTLCState::Committed,
9193 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9196 chan.context.pending_inbound_htlcs.push({
9197 let mut out = InboundHTLCOutput{
9199 amount_msat: 2000000,
9201 payment_hash: PaymentHash([0; 32]),
9202 state: InboundHTLCState::Committed,
9204 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9207 chan.context.pending_outbound_htlcs.push({
9208 let mut out = OutboundHTLCOutput{
9210 amount_msat: 2000000,
9212 payment_hash: PaymentHash([0; 32]),
9213 state: OutboundHTLCState::Committed,
9214 source: HTLCSource::dummy(),
9215 skimmed_fee_msat: None,
9216 blinding_point: None,
9218 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9221 chan.context.pending_outbound_htlcs.push({
9222 let mut out = OutboundHTLCOutput{
9224 amount_msat: 3000000,
9226 payment_hash: PaymentHash([0; 32]),
9227 state: OutboundHTLCState::Committed,
9228 source: HTLCSource::dummy(),
9229 skimmed_fee_msat: None,
9230 blinding_point: None,
9232 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9235 chan.context.pending_inbound_htlcs.push({
9236 let mut out = InboundHTLCOutput{
9238 amount_msat: 4000000,
9240 payment_hash: PaymentHash([0; 32]),
9241 state: InboundHTLCState::Committed,
9243 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9247 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9248 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9249 chan.context.feerate_per_kw = 0;
9251 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9252 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9253 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9256 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9257 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9258 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9261 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9262 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9263 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9266 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9267 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9268 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9271 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9272 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9273 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9276 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9277 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9278 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9281 // commitment tx with seven outputs untrimmed (maximum feerate)
9282 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9283 chan.context.feerate_per_kw = 647;
9285 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9286 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9287 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9290 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9291 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9292 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9295 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9296 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9297 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9300 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9301 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9302 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9305 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9306 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9307 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9310 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9311 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9312 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9315 // commitment tx with six outputs untrimmed (minimum feerate)
9316 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9317 chan.context.feerate_per_kw = 648;
9319 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9320 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9321 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9324 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9325 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9326 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9329 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9330 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9331 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9334 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9335 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9336 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9339 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9340 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9341 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9344 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9345 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9346 chan.context.feerate_per_kw = 645;
9347 chan.context.holder_dust_limit_satoshis = 1001;
9349 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9350 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9351 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9354 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9355 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9356 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9359 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9360 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9361 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9364 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9365 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9366 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9369 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9370 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9371 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9374 // commitment tx with six outputs untrimmed (maximum feerate)
9375 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9376 chan.context.feerate_per_kw = 2069;
9377 chan.context.holder_dust_limit_satoshis = 546;
9379 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9380 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9381 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9384 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9385 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9386 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9389 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9390 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9391 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9394 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9395 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9396 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9399 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9400 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9401 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9404 // commitment tx with five outputs untrimmed (minimum feerate)
9405 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9406 chan.context.feerate_per_kw = 2070;
9408 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9409 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9410 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9413 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9414 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9415 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9418 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9419 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9420 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9423 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9424 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9425 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9428 // commitment tx with five outputs untrimmed (maximum feerate)
9429 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9430 chan.context.feerate_per_kw = 2194;
9432 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9433 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9434 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9437 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9438 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9439 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9442 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9443 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9444 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9447 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9448 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9449 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9452 // commitment tx with four outputs untrimmed (minimum feerate)
9453 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9454 chan.context.feerate_per_kw = 2195;
9456 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9457 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9458 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9461 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9462 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9463 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9466 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9467 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9468 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9471 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9472 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9473 chan.context.feerate_per_kw = 2185;
9474 chan.context.holder_dust_limit_satoshis = 2001;
9475 let cached_channel_type = chan.context.channel_type;
9476 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9478 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9479 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9480 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9483 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9484 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9485 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9488 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9489 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9490 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9493 // commitment tx with four outputs untrimmed (maximum feerate)
9494 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9495 chan.context.feerate_per_kw = 3702;
9496 chan.context.holder_dust_limit_satoshis = 546;
9497 chan.context.channel_type = cached_channel_type.clone();
9499 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9500 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9501 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9504 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9505 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9506 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9509 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9510 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9511 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9514 // commitment tx with three outputs untrimmed (minimum feerate)
9515 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9516 chan.context.feerate_per_kw = 3703;
9518 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9519 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9520 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9523 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9524 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9525 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9528 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9529 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9530 chan.context.feerate_per_kw = 3687;
9531 chan.context.holder_dust_limit_satoshis = 3001;
9532 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9534 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9535 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9536 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9539 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9540 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9541 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9544 // commitment tx with three outputs untrimmed (maximum feerate)
9545 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9546 chan.context.feerate_per_kw = 4914;
9547 chan.context.holder_dust_limit_satoshis = 546;
9548 chan.context.channel_type = cached_channel_type.clone();
9550 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9551 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9552 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9555 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9556 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9557 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9560 // commitment tx with two outputs untrimmed (minimum feerate)
9561 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9562 chan.context.feerate_per_kw = 4915;
9563 chan.context.holder_dust_limit_satoshis = 546;
9565 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9566 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9567 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9569 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9570 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9571 chan.context.feerate_per_kw = 4894;
9572 chan.context.holder_dust_limit_satoshis = 4001;
9573 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9575 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9576 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9577 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9579 // commitment tx with two outputs untrimmed (maximum feerate)
9580 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9581 chan.context.feerate_per_kw = 9651180;
9582 chan.context.holder_dust_limit_satoshis = 546;
9583 chan.context.channel_type = cached_channel_type.clone();
9585 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9586 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9587 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9589 // commitment tx with one output untrimmed (minimum feerate)
9590 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9591 chan.context.feerate_per_kw = 9651181;
9593 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9594 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9595 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9597 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9598 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9599 chan.context.feerate_per_kw = 6216010;
9600 chan.context.holder_dust_limit_satoshis = 4001;
9601 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9603 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9604 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9605 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9607 // commitment tx with fee greater than funder amount
9608 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9609 chan.context.feerate_per_kw = 9651936;
9610 chan.context.holder_dust_limit_satoshis = 546;
9611 chan.context.channel_type = cached_channel_type;
9613 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9614 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9615 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9617 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9618 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9619 chan.context.feerate_per_kw = 253;
9620 chan.context.pending_inbound_htlcs.clear();
9621 chan.context.pending_inbound_htlcs.push({
9622 let mut out = InboundHTLCOutput{
9624 amount_msat: 2000000,
9626 payment_hash: PaymentHash([0; 32]),
9627 state: InboundHTLCState::Committed,
9629 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9632 chan.context.pending_outbound_htlcs.clear();
9633 chan.context.pending_outbound_htlcs.push({
9634 let mut out = OutboundHTLCOutput{
9636 amount_msat: 5000001,
9638 payment_hash: PaymentHash([0; 32]),
9639 state: OutboundHTLCState::Committed,
9640 source: HTLCSource::dummy(),
9641 skimmed_fee_msat: None,
9642 blinding_point: None,
9644 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9647 chan.context.pending_outbound_htlcs.push({
9648 let mut out = OutboundHTLCOutput{
9650 amount_msat: 5000000,
9652 payment_hash: PaymentHash([0; 32]),
9653 state: OutboundHTLCState::Committed,
9654 source: HTLCSource::dummy(),
9655 skimmed_fee_msat: None,
9656 blinding_point: None,
9658 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9662 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9663 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9664 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9667 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9668 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9669 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9671 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9672 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9673 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9675 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9676 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9677 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9680 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9681 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9682 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9683 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9686 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9687 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9688 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9690 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9691 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9692 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9694 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9695 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9696 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9701 fn test_per_commitment_secret_gen() {
9702 // Test vectors from BOLT 3 Appendix D:
9704 let mut seed = [0; 32];
9705 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9706 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9707 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9709 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9710 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9711 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9713 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9714 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9716 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9717 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9719 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9720 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9721 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9725 fn test_key_derivation() {
9726 // Test vectors from BOLT 3 Appendix E:
9727 let secp_ctx = Secp256k1::new();
9729 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9730 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9732 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9733 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9735 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9736 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9738 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9739 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9741 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9742 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9744 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9745 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9749 fn test_zero_conf_channel_type_support() {
9750 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9751 let secp_ctx = Secp256k1::new();
9752 let seed = [42; 32];
9753 let network = Network::Testnet;
9754 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9755 let logger = test_utils::TestLogger::new();
9757 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9758 let config = UserConfig::default();
9759 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9760 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9762 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9763 channel_type_features.set_zero_conf_required();
9765 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9766 open_channel_msg.channel_type = Some(channel_type_features);
9767 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9768 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9769 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9770 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9771 assert!(res.is_ok());
9775 fn test_supports_anchors_zero_htlc_tx_fee() {
9776 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9777 // resulting `channel_type`.
9778 let secp_ctx = Secp256k1::new();
9779 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9780 let network = Network::Testnet;
9781 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9782 let logger = test_utils::TestLogger::new();
9784 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9785 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9787 let mut config = UserConfig::default();
9788 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9790 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9791 // need to signal it.
9792 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9793 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9794 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9795 &config, 0, 42, None
9797 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9799 let mut expected_channel_type = ChannelTypeFeatures::empty();
9800 expected_channel_type.set_static_remote_key_required();
9801 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9803 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9804 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9805 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9809 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9810 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9811 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9812 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9813 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9816 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9817 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9821 fn test_rejects_implicit_simple_anchors() {
9822 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9823 // each side's `InitFeatures`, it is rejected.
9824 let secp_ctx = Secp256k1::new();
9825 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9826 let network = Network::Testnet;
9827 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9828 let logger = test_utils::TestLogger::new();
9830 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9831 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9833 let config = UserConfig::default();
9835 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9836 let static_remote_key_required: u64 = 1 << 12;
9837 let simple_anchors_required: u64 = 1 << 20;
9838 let raw_init_features = static_remote_key_required | simple_anchors_required;
9839 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9841 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9842 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9843 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9847 // Set `channel_type` to `None` to force the implicit feature negotiation.
9848 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9849 open_channel_msg.channel_type = None;
9851 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9852 // `static_remote_key`, it will fail the channel.
9853 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9854 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9855 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9856 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9858 assert!(channel_b.is_err());
9862 fn test_rejects_simple_anchors_channel_type() {
9863 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9865 let secp_ctx = Secp256k1::new();
9866 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9867 let network = Network::Testnet;
9868 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9869 let logger = test_utils::TestLogger::new();
9871 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9872 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9874 let config = UserConfig::default();
9876 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9877 let static_remote_key_required: u64 = 1 << 12;
9878 let simple_anchors_required: u64 = 1 << 20;
9879 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9880 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9881 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9882 assert!(!simple_anchors_init.requires_unknown_bits());
9883 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9885 // First, we'll try to open a channel between A and B where A requests a channel type for
9886 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9887 // B as it's not supported by LDK.
9888 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9889 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9890 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9894 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9895 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9897 let res = InboundV1Channel::<&TestKeysInterface>::new(
9898 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9899 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9900 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9902 assert!(res.is_err());
9904 // Then, we'll try to open another channel where A requests a channel type for
9905 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9906 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9908 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9909 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9910 10000000, 100000, 42, &config, 0, 42, None
9913 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9915 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9916 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9917 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9918 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9921 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9922 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9924 let res = channel_a.accept_channel(
9925 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9927 assert!(res.is_err());
9931 fn test_waiting_for_batch() {
9932 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9933 let logger = test_utils::TestLogger::new();
9934 let secp_ctx = Secp256k1::new();
9935 let seed = [42; 32];
9936 let network = Network::Testnet;
9937 let best_block = BestBlock::from_network(network);
9938 let chain_hash = ChainHash::using_genesis_block(network);
9939 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9941 let mut config = UserConfig::default();
9942 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9943 // channel in a batch before all channels are ready.
9944 config.channel_handshake_limits.trust_own_funding_0conf = true;
9946 // Create a channel from node a to node b that will be part of batch funding.
9947 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9948 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9953 &channelmanager::provided_init_features(&config),
9963 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9964 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9965 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9970 &channelmanager::provided_channel_type_features(&config),
9971 &channelmanager::provided_init_features(&config),
9977 true, // Allow node b to send a 0conf channel_ready.
9980 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9981 node_a_chan.accept_channel(
9982 &accept_channel_msg,
9983 &config.channel_handshake_limits,
9984 &channelmanager::provided_init_features(&config),
9987 // Fund the channel with a batch funding transaction.
9988 let output_script = node_a_chan.context.get_funding_redeemscript();
9989 let tx = Transaction {
9991 lock_time: LockTime::ZERO,
9995 value: 10000000, script_pubkey: output_script.clone(),
9998 value: 10000000, script_pubkey: Builder::new().into_script(),
10001 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10002 let funding_created_msg = node_a_chan.get_funding_created(
10003 tx.clone(), funding_outpoint, true, &&logger,
10004 ).map_err(|_| ()).unwrap();
10005 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
10006 &funding_created_msg.unwrap(),
10010 ).map_err(|_| ()).unwrap();
10011 let node_b_updates = node_b_chan.monitor_updating_restored(
10019 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
10020 // broadcasting the funding transaction until the batch is ready.
10021 let res = node_a_chan.funding_signed(
10022 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
10024 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10025 let node_a_updates = node_a_chan.monitor_updating_restored(
10032 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10033 // as the funding transaction depends on all channels in the batch becoming ready.
10034 assert!(node_a_updates.channel_ready.is_none());
10035 assert!(node_a_updates.funding_broadcastable.is_none());
10036 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10038 // It is possible to receive a 0conf channel_ready from the remote node.
10039 node_a_chan.channel_ready(
10040 &node_b_updates.channel_ready.unwrap(),
10048 node_a_chan.context.channel_state,
10049 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10052 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10053 node_a_chan.set_batch_ready();
10054 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10055 assert!(node_a_chan.check_get_channel_ready(0).is_some());