1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
265 sha256_of_onion: [u8; 32],
269 macro_rules! define_state_flags {
270 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
271 #[doc = $flag_type_doc]
272 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
273 struct $flag_type(u32);
278 const $flag: $flag_type = $flag_type($value);
281 /// All flags that apply to the specified [`ChannelState`] variant.
283 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
286 fn new() -> Self { Self(0) }
289 fn from_u32(flags: u32) -> Result<Self, ()> {
290 if flags & !Self::ALL.0 != 0 {
293 Ok($flag_type(flags))
298 fn is_empty(&self) -> bool { self.0 == 0 }
301 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
304 impl core::ops::Not for $flag_type {
306 fn not(self) -> Self::Output { Self(!self.0) }
308 impl core::ops::BitOr for $flag_type {
310 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
312 impl core::ops::BitOrAssign for $flag_type {
313 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
315 impl core::ops::BitAnd for $flag_type {
317 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
319 impl core::ops::BitAndAssign for $flag_type {
320 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
323 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
324 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
326 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
327 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
328 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
330 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
332 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
333 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
335 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
337 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
339 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
340 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
342 impl PartialEq<FundedStateFlags> for $flag_type {
343 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
345 impl From<FundedStateFlags> for $flag_type {
346 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
351 /// We declare all the states/flags here together to help determine which bits are still available
354 pub const OUR_INIT_SENT: u32 = 1 << 0;
355 pub const THEIR_INIT_SENT: u32 = 1 << 1;
356 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
357 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
358 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
359 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
360 pub const CHANNEL_READY: u32 = 1 << 6;
361 pub const PEER_DISCONNECTED: u32 = 1 << 7;
362 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
363 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
364 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
365 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
366 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
367 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
371 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
373 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
374 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
375 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
376 somewhere and we should pause sending any outbound messages until they've managed to \
377 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
378 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
379 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
380 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
381 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
382 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
387 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
388 NegotiatingFundingFlags, [
389 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
390 OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
391 ("Indicates we have received their `open_channel`/`accept_channel` message.",
392 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
397 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
398 FUNDED_STATE, AwaitingChannelReadyFlags, [
399 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
400 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
401 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
402 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
403 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
404 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
405 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
406 is being held until all channels in the batch have received `funding_signed` and have \
407 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
412 "Flags that only apply to [`ChannelState::ChannelReady`].",
413 FUNDED_STATE, ChannelReadyFlags, [
414 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
415 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
416 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
417 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
418 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
422 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
424 /// We are negotiating the parameters required for the channel prior to funding it.
425 NegotiatingFunding(NegotiatingFundingFlags),
426 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
427 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
428 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
430 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
431 /// funding transaction to confirm.
432 AwaitingChannelReady(AwaitingChannelReadyFlags),
433 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
435 ChannelReady(ChannelReadyFlags),
436 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
437 /// is about to drop us, but we store this anyway.
441 macro_rules! impl_state_flag {
442 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
444 fn $get(&self) -> bool {
447 ChannelState::$state(flags) => flags.is_set($state_flag.into()),
456 ChannelState::$state(flags) => *flags |= $state_flag,
458 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
462 fn $clear(&mut self) {
465 ChannelState::$state(flags) => *flags &= !($state_flag),
467 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
471 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
472 impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
474 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
475 impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
480 fn from_u32(state: u32) -> Result<Self, ()> {
482 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
483 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
485 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
486 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
487 .map(|flags| ChannelState::AwaitingChannelReady(flags))
488 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
489 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
490 .map(|flags| ChannelState::ChannelReady(flags))
491 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
492 Ok(ChannelState::NegotiatingFunding(flags))
500 fn to_u32(&self) -> u32 {
502 ChannelState::NegotiatingFunding(flags) => flags.0,
503 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
504 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
505 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
506 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
510 fn is_pre_funded_state(&self) -> bool {
511 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
514 fn is_both_sides_shutdown(&self) -> bool {
515 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
518 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
520 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
521 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
522 _ => FundedStateFlags::new(),
526 fn should_force_holding_cell(&self) -> bool {
528 ChannelState::ChannelReady(flags) =>
529 flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
530 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
531 flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
533 debug_assert!(false, "The holding cell is only valid within ChannelReady");
539 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
540 FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
541 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
542 FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
543 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
544 FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
545 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
546 FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
547 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
548 AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
549 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
550 AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
551 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
552 AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
553 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
554 ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
557 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
559 pub const DEFAULT_MAX_HTLCS: u16 = 50;
561 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
562 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
563 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
564 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
568 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
570 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
572 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
574 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
575 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
576 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
577 /// `holder_max_htlc_value_in_flight_msat`.
578 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
580 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
581 /// `option_support_large_channel` (aka wumbo channels) is not supported.
583 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
585 /// Total bitcoin supply in satoshis.
586 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
588 /// The maximum network dust limit for standard script formats. This currently represents the
589 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
590 /// transaction non-standard and thus refuses to relay it.
591 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
592 /// implementations use this value for their dust limit today.
593 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
595 /// The maximum channel dust limit we will accept from our counterparty.
596 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
598 /// The dust limit is used for both the commitment transaction outputs as well as the closing
599 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
600 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
601 /// In order to avoid having to concern ourselves with standardness during the closing process, we
602 /// simply require our counterparty to use a dust limit which will leave any segwit output
604 /// See <https://github.com/lightning/bolts/issues/905> for more details.
605 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
607 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
608 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
610 /// Used to return a simple Error back to ChannelManager. Will get converted to a
611 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
612 /// channel_id in ChannelManager.
613 pub(super) enum ChannelError {
619 impl fmt::Debug for ChannelError {
620 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
622 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
623 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
624 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
629 impl fmt::Display for ChannelError {
630 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
632 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
633 &ChannelError::Warn(ref e) => write!(f, "{}", e),
634 &ChannelError::Close(ref e) => write!(f, "{}", e),
639 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
641 pub peer_id: Option<PublicKey>,
642 pub channel_id: Option<ChannelId>,
645 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
646 fn log(&self, mut record: Record) {
647 record.peer_id = self.peer_id;
648 record.channel_id = self.channel_id;
649 self.logger.log(record)
653 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
654 where L::Target: Logger {
655 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
656 where S::Target: SignerProvider
660 peer_id: Some(context.counterparty_node_id),
661 channel_id: Some(context.channel_id),
666 macro_rules! secp_check {
667 ($res: expr, $err: expr) => {
670 Err(_) => return Err(ChannelError::Close($err)),
675 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
676 /// our counterparty or not. However, we don't want to announce updates right away to avoid
677 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
678 /// our channel_update message and track the current state here.
679 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
680 #[derive(Clone, Copy, PartialEq)]
681 pub(super) enum ChannelUpdateStatus {
682 /// We've announced the channel as enabled and are connected to our peer.
684 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
686 /// Our channel is live again, but we haven't announced the channel as enabled yet.
688 /// We've announced the channel as disabled.
692 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
694 pub enum AnnouncementSigsState {
695 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
696 /// we sent the last `AnnouncementSignatures`.
698 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
699 /// This state never appears on disk - instead we write `NotSent`.
701 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
702 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
703 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
704 /// they send back a `RevokeAndACK`.
705 /// This state never appears on disk - instead we write `NotSent`.
707 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
708 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
712 /// An enum indicating whether the local or remote side offered a given HTLC.
718 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
721 pending_htlcs_value_msat: u64,
722 on_counterparty_tx_dust_exposure_msat: u64,
723 on_holder_tx_dust_exposure_msat: u64,
724 holding_cell_msat: u64,
725 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
728 /// An enum gathering stats on commitment transaction, either local or remote.
729 struct CommitmentStats<'a> {
730 tx: CommitmentTransaction, // the transaction info
731 feerate_per_kw: u32, // the feerate included to build the transaction
732 total_fee_sat: u64, // the total fee included in the transaction
733 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
734 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
735 local_balance_msat: u64, // local balance before fees but considering dust limits
736 remote_balance_msat: u64, // remote balance before fees but considering dust limits
737 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
738 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
741 /// Used when calculating whether we or the remote can afford an additional HTLC.
742 struct HTLCCandidate {
744 origin: HTLCInitiator,
748 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
756 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
758 enum UpdateFulfillFetch {
760 monitor_update: ChannelMonitorUpdate,
761 htlc_value_msat: u64,
762 msg: Option<msgs::UpdateFulfillHTLC>,
767 /// The return type of get_update_fulfill_htlc_and_commit.
768 pub enum UpdateFulfillCommitFetch {
769 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
770 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
771 /// previously placed in the holding cell (and has since been removed).
773 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
774 monitor_update: ChannelMonitorUpdate,
775 /// The value of the HTLC which was claimed, in msat.
776 htlc_value_msat: u64,
778 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
779 /// or has been forgotten (presumably previously claimed).
783 /// The return value of `monitor_updating_restored`
784 pub(super) struct MonitorRestoreUpdates {
785 pub raa: Option<msgs::RevokeAndACK>,
786 pub commitment_update: Option<msgs::CommitmentUpdate>,
787 pub order: RAACommitmentOrder,
788 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
789 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
790 pub finalized_claimed_htlcs: Vec<HTLCSource>,
791 pub funding_broadcastable: Option<Transaction>,
792 pub channel_ready: Option<msgs::ChannelReady>,
793 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
796 /// The return value of `signer_maybe_unblocked`
798 pub(super) struct SignerResumeUpdates {
799 pub commitment_update: Option<msgs::CommitmentUpdate>,
800 pub funding_signed: Option<msgs::FundingSigned>,
801 pub channel_ready: Option<msgs::ChannelReady>,
804 /// The return value of `channel_reestablish`
805 pub(super) struct ReestablishResponses {
806 pub channel_ready: Option<msgs::ChannelReady>,
807 pub raa: Option<msgs::RevokeAndACK>,
808 pub commitment_update: Option<msgs::CommitmentUpdate>,
809 pub order: RAACommitmentOrder,
810 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
811 pub shutdown_msg: Option<msgs::Shutdown>,
814 /// The result of a shutdown that should be handled.
816 pub(crate) struct ShutdownResult {
817 pub(crate) closure_reason: ClosureReason,
818 /// A channel monitor update to apply.
819 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
820 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
821 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
822 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
823 /// propagated to the remainder of the batch.
824 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
825 pub(crate) channel_id: ChannelId,
826 pub(crate) user_channel_id: u128,
827 pub(crate) channel_capacity_satoshis: u64,
828 pub(crate) counterparty_node_id: PublicKey,
829 pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
832 /// If the majority of the channels funds are to the fundee and the initiator holds only just
833 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
834 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
835 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
836 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
837 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
838 /// by this multiple without hitting this case, before sending.
839 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
840 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
841 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
842 /// leave the channel less usable as we hold a bigger reserve.
843 #[cfg(any(fuzzing, test))]
844 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
845 #[cfg(not(any(fuzzing, test)))]
846 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
848 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
849 /// channel creation on an inbound channel, we simply force-close and move on.
850 /// This constant is the one suggested in BOLT 2.
851 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
853 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
854 /// not have enough balance value remaining to cover the onchain cost of this new
855 /// HTLC weight. If this happens, our counterparty fails the reception of our
856 /// commitment_signed including this new HTLC due to infringement on the channel
858 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
859 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
860 /// leads to a channel force-close. Ultimately, this is an issue coming from the
861 /// design of LN state machines, allowing asynchronous updates.
862 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
864 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
865 /// commitment transaction fees, with at least this many HTLCs present on the commitment
866 /// transaction (not counting the value of the HTLCs themselves).
867 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
869 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
870 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
871 /// ChannelUpdate prompted by the config update. This value was determined as follows:
873 /// * The expected interval between ticks (1 minute).
874 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
875 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
876 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
877 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
879 /// The number of ticks that may elapse while we're waiting for a response to a
880 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
883 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
884 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
886 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
887 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
888 /// exceeding this age limit will be force-closed and purged from memory.
889 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
891 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
892 pub(crate) const COINBASE_MATURITY: u32 = 100;
894 struct PendingChannelMonitorUpdate {
895 update: ChannelMonitorUpdate,
898 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
899 (0, update, required),
902 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
903 /// its variants containing an appropriate channel struct.
904 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
905 UnfundedOutboundV1(OutboundV1Channel<SP>),
906 UnfundedInboundV1(InboundV1Channel<SP>),
910 impl<'a, SP: Deref> ChannelPhase<SP> where
911 SP::Target: SignerProvider,
912 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
914 pub fn context(&'a self) -> &'a ChannelContext<SP> {
916 ChannelPhase::Funded(chan) => &chan.context,
917 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
918 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
922 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
924 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
925 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
926 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
931 /// Contains all state common to unfunded inbound/outbound channels.
932 pub(super) struct UnfundedChannelContext {
933 /// A counter tracking how many ticks have elapsed since this unfunded channel was
934 /// created. If this unfunded channel reaches peer has yet to respond after reaching
935 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
937 /// This is so that we don't keep channels around that haven't progressed to a funded state
938 /// in a timely manner.
939 unfunded_channel_age_ticks: usize,
942 impl UnfundedChannelContext {
943 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
944 /// having reached the unfunded channel age limit.
946 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
947 pub fn should_expire_unfunded_channel(&mut self) -> bool {
948 self.unfunded_channel_age_ticks += 1;
949 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
953 /// Contains everything about the channel including state, and various flags.
954 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
955 config: LegacyChannelConfig,
957 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
958 // constructed using it. The second element in the tuple corresponds to the number of ticks that
959 // have elapsed since the update occurred.
960 prev_config: Option<(ChannelConfig, usize)>,
962 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
966 /// The current channel ID.
967 channel_id: ChannelId,
968 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
969 /// Will be `None` for channels created prior to 0.0.115.
970 temporary_channel_id: Option<ChannelId>,
971 channel_state: ChannelState,
973 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
974 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
976 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
977 // Note that a number of our tests were written prior to the behavior here which retransmits
978 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
980 #[cfg(any(test, feature = "_test_utils"))]
981 pub(crate) announcement_sigs_state: AnnouncementSigsState,
982 #[cfg(not(any(test, feature = "_test_utils")))]
983 announcement_sigs_state: AnnouncementSigsState,
985 secp_ctx: Secp256k1<secp256k1::All>,
986 channel_value_satoshis: u64,
988 latest_monitor_update_id: u64,
990 holder_signer: ChannelSignerType<SP>,
991 shutdown_scriptpubkey: Option<ShutdownScript>,
992 destination_script: ScriptBuf,
994 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
995 // generation start at 0 and count up...this simplifies some parts of implementation at the
996 // cost of others, but should really just be changed.
998 cur_holder_commitment_transaction_number: u64,
999 cur_counterparty_commitment_transaction_number: u64,
1000 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
1001 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
1002 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
1003 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1005 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1006 /// need to ensure we resend them in the order we originally generated them. Note that because
1007 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1008 /// sufficient to simply set this to the opposite of any message we are generating as we
1009 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1010 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1012 resend_order: RAACommitmentOrder,
1014 monitor_pending_channel_ready: bool,
1015 monitor_pending_revoke_and_ack: bool,
1016 monitor_pending_commitment_signed: bool,
1018 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1019 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1020 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1021 // but need to handle this somehow or we run the risk of losing HTLCs!
1022 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1023 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1024 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1026 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1027 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1028 /// the future when the signer indicates it may have a signature for us.
1030 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1031 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1032 signer_pending_commitment_update: bool,
1033 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1034 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1035 /// outbound or inbound.
1036 signer_pending_funding: bool,
1038 // pending_update_fee is filled when sending and receiving update_fee.
1040 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1041 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1042 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1043 // HTLCs with similar state.
1044 pending_update_fee: Option<(u32, FeeUpdateState)>,
1045 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1046 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1047 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1048 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1049 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1050 holding_cell_update_fee: Option<u32>,
1051 next_holder_htlc_id: u64,
1052 next_counterparty_htlc_id: u64,
1053 feerate_per_kw: u32,
1055 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1056 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1057 /// new block is received, ensuring it's always at least moderately close to the current real
1059 update_time_counter: u32,
1061 #[cfg(debug_assertions)]
1062 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1063 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1064 #[cfg(debug_assertions)]
1065 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1066 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1068 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1069 target_closing_feerate_sats_per_kw: Option<u32>,
1071 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1072 /// update, we need to delay processing it until later. We do that here by simply storing the
1073 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1074 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1076 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1077 /// transaction. These are set once we reach `closing_negotiation_ready`.
1079 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1081 closing_fee_limits: Option<(u64, u64)>,
1083 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1084 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1085 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1086 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1087 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1089 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1090 /// until we see a `commitment_signed` before doing so.
1092 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1093 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1094 expecting_peer_commitment_signed: bool,
1096 /// The hash of the block in which the funding transaction was included.
1097 funding_tx_confirmed_in: Option<BlockHash>,
1098 funding_tx_confirmation_height: u32,
1099 short_channel_id: Option<u64>,
1100 /// Either the height at which this channel was created or the height at which it was last
1101 /// serialized if it was serialized by versions prior to 0.0.103.
1102 /// We use this to close if funding is never broadcasted.
1103 channel_creation_height: u32,
1105 counterparty_dust_limit_satoshis: u64,
1108 pub(super) holder_dust_limit_satoshis: u64,
1110 holder_dust_limit_satoshis: u64,
1113 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1115 counterparty_max_htlc_value_in_flight_msat: u64,
1118 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1120 holder_max_htlc_value_in_flight_msat: u64,
1122 /// minimum channel reserve for self to maintain - set by them.
1123 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1126 pub(super) holder_selected_channel_reserve_satoshis: u64,
1128 holder_selected_channel_reserve_satoshis: u64,
1130 counterparty_htlc_minimum_msat: u64,
1131 holder_htlc_minimum_msat: u64,
1133 pub counterparty_max_accepted_htlcs: u16,
1135 counterparty_max_accepted_htlcs: u16,
1136 holder_max_accepted_htlcs: u16,
1137 minimum_depth: Option<u32>,
1139 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1141 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1142 funding_transaction: Option<Transaction>,
1143 is_batch_funding: Option<()>,
1145 counterparty_cur_commitment_point: Option<PublicKey>,
1146 counterparty_prev_commitment_point: Option<PublicKey>,
1147 counterparty_node_id: PublicKey,
1149 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1151 commitment_secrets: CounterpartyCommitmentSecrets,
1153 channel_update_status: ChannelUpdateStatus,
1154 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1155 /// not complete within a single timer tick (one minute), we should force-close the channel.
1156 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1158 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1159 /// our peer and start the closing_signed negotiation fresh.
1160 closing_signed_in_flight: bool,
1162 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1163 /// This can be used to rebroadcast the channel_announcement message later.
1164 announcement_sigs: Option<(Signature, Signature)>,
1166 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1167 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1168 // be, by comparing the cached values to the fee of the tranaction generated by
1169 // `build_commitment_transaction`.
1170 #[cfg(any(test, fuzzing))]
1171 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1172 #[cfg(any(test, fuzzing))]
1173 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1175 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1176 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1177 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1178 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1179 /// message until we receive a channel_reestablish.
1181 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1182 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1184 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1185 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1186 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1187 /// unblock the state machine.
1189 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1190 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1191 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1193 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1194 /// [`msgs::RevokeAndACK`] message from the counterparty.
1195 sent_message_awaiting_response: Option<usize>,
1197 #[cfg(any(test, fuzzing))]
1198 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1199 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1200 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1201 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1202 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1203 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1204 historical_inbound_htlc_fulfills: HashSet<u64>,
1206 /// This channel's type, as negotiated during channel open
1207 channel_type: ChannelTypeFeatures,
1209 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1210 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1211 // the channel's funding UTXO.
1213 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1214 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1215 // associated channel mapping.
1217 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1218 // to store all of them.
1219 latest_inbound_scid_alias: Option<u64>,
1221 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1222 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1223 // don't currently support node id aliases and eventually privacy should be provided with
1224 // blinded paths instead of simple scid+node_id aliases.
1225 outbound_scid_alias: u64,
1227 // We track whether we already emitted a `ChannelPending` event.
1228 channel_pending_event_emitted: bool,
1230 // We track whether we already emitted a `ChannelReady` event.
1231 channel_ready_event_emitted: bool,
1233 /// The unique identifier used to re-derive the private key material for the channel through
1234 /// [`SignerProvider::derive_channel_signer`].
1235 channel_keys_id: [u8; 32],
1237 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1238 /// store it here and only release it to the `ChannelManager` once it asks for it.
1239 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1242 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1243 /// Allowed in any state (including after shutdown)
1244 pub fn get_update_time_counter(&self) -> u32 {
1245 self.update_time_counter
1248 pub fn get_latest_monitor_update_id(&self) -> u64 {
1249 self.latest_monitor_update_id
1252 pub fn should_announce(&self) -> bool {
1253 self.config.announced_channel
1256 pub fn is_outbound(&self) -> bool {
1257 self.channel_transaction_parameters.is_outbound_from_holder
1260 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1261 /// Allowed in any state (including after shutdown)
1262 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1263 self.config.options.forwarding_fee_base_msat
1266 /// Returns true if we've ever received a message from the remote end for this Channel
1267 pub fn have_received_message(&self) -> bool {
1268 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1271 /// Returns true if this channel is fully established and not known to be closing.
1272 /// Allowed in any state (including after shutdown)
1273 pub fn is_usable(&self) -> bool {
1274 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1275 !self.channel_state.is_local_shutdown_sent() &&
1276 !self.channel_state.is_remote_shutdown_sent() &&
1277 !self.monitor_pending_channel_ready
1280 /// shutdown state returns the state of the channel in its various stages of shutdown
1281 pub fn shutdown_state(&self) -> ChannelShutdownState {
1282 match self.channel_state {
1283 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1284 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1285 ChannelShutdownState::ShutdownInitiated
1286 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1287 ChannelShutdownState::ResolvingHTLCs
1288 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1289 ChannelShutdownState::NegotiatingClosingFee
1291 ChannelShutdownState::NotShuttingDown
1293 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1294 _ => ChannelShutdownState::NotShuttingDown,
1298 fn closing_negotiation_ready(&self) -> bool {
1299 let is_ready_to_close = match self.channel_state {
1300 ChannelState::AwaitingChannelReady(flags) =>
1301 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1302 ChannelState::ChannelReady(flags) =>
1303 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1306 self.pending_inbound_htlcs.is_empty() &&
1307 self.pending_outbound_htlcs.is_empty() &&
1308 self.pending_update_fee.is_none() &&
1312 /// Returns true if this channel is currently available for use. This is a superset of
1313 /// is_usable() and considers things like the channel being temporarily disabled.
1314 /// Allowed in any state (including after shutdown)
1315 pub fn is_live(&self) -> bool {
1316 self.is_usable() && !self.channel_state.is_peer_disconnected()
1319 // Public utilities:
1321 pub fn channel_id(&self) -> ChannelId {
1325 // Return the `temporary_channel_id` used during channel establishment.
1327 // Will return `None` for channels created prior to LDK version 0.0.115.
1328 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1329 self.temporary_channel_id
1332 pub fn minimum_depth(&self) -> Option<u32> {
1336 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1337 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1338 pub fn get_user_id(&self) -> u128 {
1342 /// Gets the channel's type
1343 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1347 /// Gets the channel's `short_channel_id`.
1349 /// Will return `None` if the channel hasn't been confirmed yet.
1350 pub fn get_short_channel_id(&self) -> Option<u64> {
1351 self.short_channel_id
1354 /// Allowed in any state (including after shutdown)
1355 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1356 self.latest_inbound_scid_alias
1359 /// Allowed in any state (including after shutdown)
1360 pub fn outbound_scid_alias(&self) -> u64 {
1361 self.outbound_scid_alias
1364 /// Returns the holder signer for this channel.
1366 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1367 return &self.holder_signer
1370 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1371 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1372 /// or prior to any channel actions during `Channel` initialization.
1373 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1374 debug_assert_eq!(self.outbound_scid_alias, 0);
1375 self.outbound_scid_alias = outbound_scid_alias;
1378 /// Returns the funding_txo we either got from our peer, or were given by
1379 /// get_funding_created.
1380 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1381 self.channel_transaction_parameters.funding_outpoint
1384 /// Returns the height in which our funding transaction was confirmed.
1385 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1386 let conf_height = self.funding_tx_confirmation_height;
1387 if conf_height > 0 {
1394 /// Returns the block hash in which our funding transaction was confirmed.
1395 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1396 self.funding_tx_confirmed_in
1399 /// Returns the current number of confirmations on the funding transaction.
1400 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1401 if self.funding_tx_confirmation_height == 0 {
1402 // We either haven't seen any confirmation yet, or observed a reorg.
1406 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1409 fn get_holder_selected_contest_delay(&self) -> u16 {
1410 self.channel_transaction_parameters.holder_selected_contest_delay
1413 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1414 &self.channel_transaction_parameters.holder_pubkeys
1417 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1418 self.channel_transaction_parameters.counterparty_parameters
1419 .as_ref().map(|params| params.selected_contest_delay)
1422 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1423 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1426 /// Allowed in any state (including after shutdown)
1427 pub fn get_counterparty_node_id(&self) -> PublicKey {
1428 self.counterparty_node_id
1431 /// Allowed in any state (including after shutdown)
1432 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1433 self.holder_htlc_minimum_msat
1436 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1437 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1438 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1441 /// Allowed in any state (including after shutdown)
1442 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1444 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1445 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1446 // channel might have been used to route very small values (either by honest users or as DoS).
1447 self.channel_value_satoshis * 1000 * 9 / 10,
1449 self.counterparty_max_htlc_value_in_flight_msat
1453 /// Allowed in any state (including after shutdown)
1454 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1455 self.counterparty_htlc_minimum_msat
1458 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1459 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1460 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1463 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1464 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1465 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1467 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1468 party_max_htlc_value_in_flight_msat
1473 pub fn get_value_satoshis(&self) -> u64 {
1474 self.channel_value_satoshis
1477 pub fn get_fee_proportional_millionths(&self) -> u32 {
1478 self.config.options.forwarding_fee_proportional_millionths
1481 pub fn get_cltv_expiry_delta(&self) -> u16 {
1482 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1485 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1486 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1487 where F::Target: FeeEstimator
1489 match self.config.options.max_dust_htlc_exposure {
1490 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1491 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1492 ConfirmationTarget::OnChainSweep) as u64;
1493 feerate_per_kw.saturating_mul(multiplier)
1495 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1499 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1500 pub fn prev_config(&self) -> Option<ChannelConfig> {
1501 self.prev_config.map(|prev_config| prev_config.0)
1504 // Checks whether we should emit a `ChannelPending` event.
1505 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1506 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1509 // Returns whether we already emitted a `ChannelPending` event.
1510 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1511 self.channel_pending_event_emitted
1514 // Remembers that we already emitted a `ChannelPending` event.
1515 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1516 self.channel_pending_event_emitted = true;
1519 // Checks whether we should emit a `ChannelReady` event.
1520 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1521 self.is_usable() && !self.channel_ready_event_emitted
1524 // Remembers that we already emitted a `ChannelReady` event.
1525 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1526 self.channel_ready_event_emitted = true;
1529 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1530 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1531 /// no longer be considered when forwarding HTLCs.
1532 pub fn maybe_expire_prev_config(&mut self) {
1533 if self.prev_config.is_none() {
1536 let prev_config = self.prev_config.as_mut().unwrap();
1538 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1539 self.prev_config = None;
1543 /// Returns the current [`ChannelConfig`] applied to the channel.
1544 pub fn config(&self) -> ChannelConfig {
1548 /// Updates the channel's config. A bool is returned indicating whether the config update
1549 /// applied resulted in a new ChannelUpdate message.
1550 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1551 let did_channel_update =
1552 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1553 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1554 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1555 if did_channel_update {
1556 self.prev_config = Some((self.config.options, 0));
1557 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1558 // policy change to propagate throughout the network.
1559 self.update_time_counter += 1;
1561 self.config.options = *config;
1565 /// Returns true if funding_signed was sent/received and the
1566 /// funding transaction has been broadcast if necessary.
1567 pub fn is_funding_broadcast(&self) -> bool {
1568 !self.channel_state.is_pre_funded_state() &&
1569 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1572 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1573 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1574 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1575 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1576 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1578 /// @local is used only to convert relevant internal structures which refer to remote vs local
1579 /// to decide value of outputs and direction of HTLCs.
1580 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1581 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1582 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1583 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1584 /// which peer generated this transaction and "to whom" this transaction flows.
1586 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1587 where L::Target: Logger
1589 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1590 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1591 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1593 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1594 let mut remote_htlc_total_msat = 0;
1595 let mut local_htlc_total_msat = 0;
1596 let mut value_to_self_msat_offset = 0;
1598 let mut feerate_per_kw = self.feerate_per_kw;
1599 if let Some((feerate, update_state)) = self.pending_update_fee {
1600 if match update_state {
1601 // Note that these match the inclusion criteria when scanning
1602 // pending_inbound_htlcs below.
1603 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1604 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1605 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1607 feerate_per_kw = feerate;
1611 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1612 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1613 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1615 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1617 macro_rules! get_htlc_in_commitment {
1618 ($htlc: expr, $offered: expr) => {
1619 HTLCOutputInCommitment {
1621 amount_msat: $htlc.amount_msat,
1622 cltv_expiry: $htlc.cltv_expiry,
1623 payment_hash: $htlc.payment_hash,
1624 transaction_output_index: None
1629 macro_rules! add_htlc_output {
1630 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1631 if $outbound == local { // "offered HTLC output"
1632 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1633 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1636 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1638 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1639 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1640 included_non_dust_htlcs.push((htlc_in_tx, $source));
1642 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1643 included_dust_htlcs.push((htlc_in_tx, $source));
1646 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1647 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1650 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1652 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1653 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1654 included_non_dust_htlcs.push((htlc_in_tx, $source));
1656 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1657 included_dust_htlcs.push((htlc_in_tx, $source));
1663 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1665 for ref htlc in self.pending_inbound_htlcs.iter() {
1666 let (include, state_name) = match htlc.state {
1667 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1668 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1669 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1670 InboundHTLCState::Committed => (true, "Committed"),
1671 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1675 add_htlc_output!(htlc, false, None, state_name);
1676 remote_htlc_total_msat += htlc.amount_msat;
1678 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1680 &InboundHTLCState::LocalRemoved(ref reason) => {
1681 if generated_by_local {
1682 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1683 inbound_htlc_preimages.push(preimage);
1684 value_to_self_msat_offset += htlc.amount_msat as i64;
1694 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1696 for ref htlc in self.pending_outbound_htlcs.iter() {
1697 let (include, state_name) = match htlc.state {
1698 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1699 OutboundHTLCState::Committed => (true, "Committed"),
1700 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1701 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1702 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1705 let preimage_opt = match htlc.state {
1706 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1707 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1708 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1712 if let Some(preimage) = preimage_opt {
1713 outbound_htlc_preimages.push(preimage);
1717 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1718 local_htlc_total_msat += htlc.amount_msat;
1720 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1722 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1723 value_to_self_msat_offset -= htlc.amount_msat as i64;
1725 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1726 if !generated_by_local {
1727 value_to_self_msat_offset -= htlc.amount_msat as i64;
1735 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1736 assert!(value_to_self_msat >= 0);
1737 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1738 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1739 // "violate" their reserve value by couting those against it. Thus, we have to convert
1740 // everything to i64 before subtracting as otherwise we can overflow.
1741 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1742 assert!(value_to_remote_msat >= 0);
1744 #[cfg(debug_assertions)]
1746 // Make sure that the to_self/to_remote is always either past the appropriate
1747 // channel_reserve *or* it is making progress towards it.
1748 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1749 self.holder_max_commitment_tx_output.lock().unwrap()
1751 self.counterparty_max_commitment_tx_output.lock().unwrap()
1753 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1754 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1755 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1756 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1759 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1760 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1761 let (value_to_self, value_to_remote) = if self.is_outbound() {
1762 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1764 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1767 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1768 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1769 let (funding_pubkey_a, funding_pubkey_b) = if local {
1770 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1772 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1775 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1776 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1781 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1782 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1787 let num_nondust_htlcs = included_non_dust_htlcs.len();
1789 let channel_parameters =
1790 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1791 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1792 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1799 &mut included_non_dust_htlcs,
1802 let mut htlcs_included = included_non_dust_htlcs;
1803 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1804 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1805 htlcs_included.append(&mut included_dust_htlcs);
1807 // For the stats, trimmed-to-0 the value in msats accordingly
1808 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1809 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1817 local_balance_msat: value_to_self_msat as u64,
1818 remote_balance_msat: value_to_remote_msat as u64,
1819 inbound_htlc_preimages,
1820 outbound_htlc_preimages,
1825 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1826 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1827 /// our counterparty!)
1828 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1829 /// TODO Some magic rust shit to compile-time check this?
1830 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1831 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1832 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1833 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1834 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1836 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1840 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1841 /// will sign and send to our counterparty.
1842 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1843 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1844 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1845 //may see payments to it!
1846 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1847 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1848 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1850 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1853 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1854 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1855 /// Panics if called before accept_channel/InboundV1Channel::new
1856 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1857 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1860 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1861 &self.get_counterparty_pubkeys().funding_pubkey
1864 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1868 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1869 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1870 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1871 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1872 // more dust balance if the feerate increases when we have several HTLCs pending
1873 // which are near the dust limit.
1874 let mut feerate_per_kw = self.feerate_per_kw;
1875 // If there's a pending update fee, use it to ensure we aren't under-estimating
1876 // potential feerate updates coming soon.
1877 if let Some((feerate, _)) = self.pending_update_fee {
1878 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1880 if let Some(feerate) = outbound_feerate_update {
1881 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1883 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1886 /// Get forwarding information for the counterparty.
1887 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1888 self.counterparty_forwarding_info.clone()
1891 /// Returns a HTLCStats about inbound pending htlcs
1892 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1894 let mut stats = HTLCStats {
1895 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1896 pending_htlcs_value_msat: 0,
1897 on_counterparty_tx_dust_exposure_msat: 0,
1898 on_holder_tx_dust_exposure_msat: 0,
1899 holding_cell_msat: 0,
1900 on_holder_tx_holding_cell_htlcs_count: 0,
1903 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1906 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1907 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1908 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1910 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1911 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1912 for ref htlc in context.pending_inbound_htlcs.iter() {
1913 stats.pending_htlcs_value_msat += htlc.amount_msat;
1914 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1915 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1917 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1918 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1924 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1925 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1927 let mut stats = HTLCStats {
1928 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1929 pending_htlcs_value_msat: 0,
1930 on_counterparty_tx_dust_exposure_msat: 0,
1931 on_holder_tx_dust_exposure_msat: 0,
1932 holding_cell_msat: 0,
1933 on_holder_tx_holding_cell_htlcs_count: 0,
1936 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1939 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1940 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1941 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1943 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1944 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1945 for ref htlc in context.pending_outbound_htlcs.iter() {
1946 stats.pending_htlcs_value_msat += htlc.amount_msat;
1947 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1948 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1950 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1951 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1955 for update in context.holding_cell_htlc_updates.iter() {
1956 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1957 stats.pending_htlcs += 1;
1958 stats.pending_htlcs_value_msat += amount_msat;
1959 stats.holding_cell_msat += amount_msat;
1960 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1961 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1963 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1964 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1966 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1973 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1974 /// Doesn't bother handling the
1975 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1976 /// corner case properly.
1977 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1978 -> AvailableBalances
1979 where F::Target: FeeEstimator
1981 let context = &self;
1982 // Note that we have to handle overflow due to the above case.
1983 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1984 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1986 let mut balance_msat = context.value_to_self_msat;
1987 for ref htlc in context.pending_inbound_htlcs.iter() {
1988 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1989 balance_msat += htlc.amount_msat;
1992 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1994 let outbound_capacity_msat = context.value_to_self_msat
1995 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1997 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1999 let mut available_capacity_msat = outbound_capacity_msat;
2001 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2002 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2006 if context.is_outbound() {
2007 // We should mind channel commit tx fee when computing how much of the available capacity
2008 // can be used in the next htlc. Mirrors the logic in send_htlc.
2010 // The fee depends on whether the amount we will be sending is above dust or not,
2011 // and the answer will in turn change the amount itself — making it a circular
2013 // This complicates the computation around dust-values, up to the one-htlc-value.
2014 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2015 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2016 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2019 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2020 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2021 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2022 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2023 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2024 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2025 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2028 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2029 // value ends up being below dust, we have this fee available again. In that case,
2030 // match the value to right-below-dust.
2031 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2032 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2033 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2034 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2035 debug_assert!(one_htlc_difference_msat != 0);
2036 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2037 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2038 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2040 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2043 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2044 // sending a new HTLC won't reduce their balance below our reserve threshold.
2045 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2046 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2047 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2050 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2051 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2053 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2054 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2055 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2057 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2058 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2059 // we've selected for them, we can only send dust HTLCs.
2060 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2064 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2066 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2067 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2068 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2069 // send above the dust limit (as the router can always overpay to meet the dust limit).
2070 let mut remaining_msat_below_dust_exposure_limit = None;
2071 let mut dust_exposure_dust_limit_msat = 0;
2072 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2074 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2075 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2077 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2078 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2079 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2081 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2082 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2083 remaining_msat_below_dust_exposure_limit =
2084 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2085 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2088 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2089 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2090 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2091 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2092 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2093 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2096 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2097 if available_capacity_msat < dust_exposure_dust_limit_msat {
2098 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2100 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2104 available_capacity_msat = cmp::min(available_capacity_msat,
2105 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2107 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2108 available_capacity_msat = 0;
2112 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2113 - context.value_to_self_msat as i64
2114 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2115 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2117 outbound_capacity_msat,
2118 next_outbound_htlc_limit_msat: available_capacity_msat,
2119 next_outbound_htlc_minimum_msat,
2124 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2125 let context = &self;
2126 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2129 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2130 /// number of pending HTLCs that are on track to be in our next commitment tx.
2132 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2133 /// `fee_spike_buffer_htlc` is `Some`.
2135 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2136 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2138 /// Dust HTLCs are excluded.
2139 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2140 let context = &self;
2141 assert!(context.is_outbound());
2143 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2146 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2147 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2149 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2150 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2152 let mut addl_htlcs = 0;
2153 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2155 HTLCInitiator::LocalOffered => {
2156 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2160 HTLCInitiator::RemoteOffered => {
2161 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2167 let mut included_htlcs = 0;
2168 for ref htlc in context.pending_inbound_htlcs.iter() {
2169 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2172 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2173 // transaction including this HTLC if it times out before they RAA.
2174 included_htlcs += 1;
2177 for ref htlc in context.pending_outbound_htlcs.iter() {
2178 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2182 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2183 OutboundHTLCState::Committed => included_htlcs += 1,
2184 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2185 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2186 // transaction won't be generated until they send us their next RAA, which will mean
2187 // dropping any HTLCs in this state.
2192 for htlc in context.holding_cell_htlc_updates.iter() {
2194 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2195 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2200 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2201 // ack we're guaranteed to never include them in commitment txs anymore.
2205 let num_htlcs = included_htlcs + addl_htlcs;
2206 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2207 #[cfg(any(test, fuzzing))]
2210 if fee_spike_buffer_htlc.is_some() {
2211 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2213 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2214 + context.holding_cell_htlc_updates.len();
2215 let commitment_tx_info = CommitmentTxInfoCached {
2217 total_pending_htlcs,
2218 next_holder_htlc_id: match htlc.origin {
2219 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2220 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2222 next_counterparty_htlc_id: match htlc.origin {
2223 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2224 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2226 feerate: context.feerate_per_kw,
2228 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2233 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2234 /// pending HTLCs that are on track to be in their next commitment tx
2236 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2237 /// `fee_spike_buffer_htlc` is `Some`.
2239 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2240 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2242 /// Dust HTLCs are excluded.
2243 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2244 let context = &self;
2245 assert!(!context.is_outbound());
2247 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2250 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2251 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2253 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2254 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2256 let mut addl_htlcs = 0;
2257 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2259 HTLCInitiator::LocalOffered => {
2260 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2264 HTLCInitiator::RemoteOffered => {
2265 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2271 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2272 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2273 // committed outbound HTLCs, see below.
2274 let mut included_htlcs = 0;
2275 for ref htlc in context.pending_inbound_htlcs.iter() {
2276 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2279 included_htlcs += 1;
2282 for ref htlc in context.pending_outbound_htlcs.iter() {
2283 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2286 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2287 // i.e. if they've responded to us with an RAA after announcement.
2289 OutboundHTLCState::Committed => included_htlcs += 1,
2290 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2291 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2296 let num_htlcs = included_htlcs + addl_htlcs;
2297 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2298 #[cfg(any(test, fuzzing))]
2301 if fee_spike_buffer_htlc.is_some() {
2302 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2304 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2305 let commitment_tx_info = CommitmentTxInfoCached {
2307 total_pending_htlcs,
2308 next_holder_htlc_id: match htlc.origin {
2309 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2310 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2312 next_counterparty_htlc_id: match htlc.origin {
2313 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2314 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2316 feerate: context.feerate_per_kw,
2318 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2323 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
2324 match self.channel_state {
2325 ChannelState::FundingNegotiated => f(),
2326 ChannelState::AwaitingChannelReady(flags) =>
2327 if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
2328 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
2338 /// Returns the transaction if there is a pending funding transaction that is yet to be
2340 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2341 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2344 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2346 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2347 self.if_unbroadcasted_funding(||
2348 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2352 /// Returns whether the channel is funded in a batch.
2353 pub fn is_batch_funding(&self) -> bool {
2354 self.is_batch_funding.is_some()
2357 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2359 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2360 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2363 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2364 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2365 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2366 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2367 /// immediately (others we will have to allow to time out).
2368 pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
2369 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2370 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2371 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2372 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2373 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2375 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2376 // return them to fail the payment.
2377 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2378 let counterparty_node_id = self.get_counterparty_node_id();
2379 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2381 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2382 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2387 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2388 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2389 // returning a channel monitor update here would imply a channel monitor update before
2390 // we even registered the channel monitor to begin with, which is invalid.
2391 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2392 // funding transaction, don't return a funding txo (which prevents providing the
2393 // monitor update to the user, even if we return one).
2394 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2395 let generate_monitor_update = match self.channel_state {
2396 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
2399 if generate_monitor_update {
2400 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2401 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2402 update_id: self.latest_monitor_update_id,
2403 counterparty_node_id: Some(self.counterparty_node_id),
2404 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2408 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2409 let unbroadcasted_funding_tx = self.unbroadcasted_funding();
2411 self.channel_state = ChannelState::ShutdownComplete;
2412 self.update_time_counter += 1;
2416 dropped_outbound_htlcs,
2417 unbroadcasted_batch_funding_txid,
2418 channel_id: self.channel_id,
2419 user_channel_id: self.user_id,
2420 channel_capacity_satoshis: self.channel_value_satoshis,
2421 counterparty_node_id: self.counterparty_node_id,
2422 unbroadcasted_funding_tx,
2426 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2427 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2428 let counterparty_keys = self.build_remote_transaction_keys();
2429 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2431 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2432 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2433 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2434 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2436 match &self.holder_signer {
2437 // TODO (arik): move match into calling method for Taproot
2438 ChannelSignerType::Ecdsa(ecdsa) => {
2439 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2440 .map(|(signature, _)| msgs::FundingSigned {
2441 channel_id: self.channel_id(),
2444 partial_signature_with_nonce: None,
2448 if funding_signed.is_none() {
2449 #[cfg(not(async_signing))] {
2450 panic!("Failed to get signature for funding_signed");
2452 #[cfg(async_signing)] {
2453 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2454 self.signer_pending_funding = true;
2456 } else if self.signer_pending_funding {
2457 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2458 self.signer_pending_funding = false;
2461 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2462 (counterparty_initial_commitment_tx, funding_signed)
2464 // TODO (taproot|arik)
2471 // Internal utility functions for channels
2473 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2474 /// `channel_value_satoshis` in msat, set through
2475 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2477 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2479 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2480 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2481 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2483 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2486 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2488 channel_value_satoshis * 10 * configured_percent
2491 /// Returns a minimum channel reserve value the remote needs to maintain,
2492 /// required by us according to the configured or default
2493 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2495 /// Guaranteed to return a value no larger than channel_value_satoshis
2497 /// This is used both for outbound and inbound channels and has lower bound
2498 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2499 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2500 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2501 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2504 /// This is for legacy reasons, present for forward-compatibility.
2505 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2506 /// from storage. Hence, we use this function to not persist default values of
2507 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2508 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2509 let (q, _) = channel_value_satoshis.overflowing_div(100);
2510 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2513 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2514 // Note that num_htlcs should not include dust HTLCs.
2516 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2517 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2520 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2521 // Note that num_htlcs should not include dust HTLCs.
2522 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2523 // Note that we need to divide before multiplying to round properly,
2524 // since the lowest denomination of bitcoin on-chain is the satoshi.
2525 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2528 // Holder designates channel data owned for the benefit of the user client.
2529 // Counterparty designates channel data owned by the another channel participant entity.
2530 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2531 pub context: ChannelContext<SP>,
2534 #[cfg(any(test, fuzzing))]
2535 struct CommitmentTxInfoCached {
2537 total_pending_htlcs: usize,
2538 next_holder_htlc_id: u64,
2539 next_counterparty_htlc_id: u64,
2543 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
2544 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
2545 trait FailHTLCContents {
2546 type Message: FailHTLCMessageName;
2547 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
2548 fn to_inbound_htlc_state(self) -> InboundHTLCState;
2549 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
2551 impl FailHTLCContents for msgs::OnionErrorPacket {
2552 type Message = msgs::UpdateFailHTLC;
2553 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2554 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
2556 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2557 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
2559 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2560 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
2563 impl FailHTLCContents for (u16, [u8; 32]) {
2564 type Message = msgs::UpdateFailMalformedHTLC; // (failure_code, sha256_of_onion)
2565 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2566 msgs::UpdateFailMalformedHTLC {
2569 failure_code: self.0,
2570 sha256_of_onion: self.1
2573 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2574 InboundHTLCState::LocalRemoved(
2575 InboundHTLCRemovalReason::FailMalformed((self.1, self.0))
2578 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2579 HTLCUpdateAwaitingACK::FailMalformedHTLC {
2581 failure_code: self.0,
2582 sha256_of_onion: self.1
2587 trait FailHTLCMessageName {
2588 fn name() -> &'static str;
2590 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
2591 fn name() -> &'static str {
2595 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
2596 fn name() -> &'static str {
2597 "update_fail_malformed_htlc"
2601 impl<SP: Deref> Channel<SP> where
2602 SP::Target: SignerProvider,
2603 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2605 fn check_remote_fee<F: Deref, L: Deref>(
2606 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2607 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2608 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2610 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2611 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2613 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2615 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2616 if feerate_per_kw < lower_limit {
2617 if let Some(cur_feerate) = cur_feerate_per_kw {
2618 if feerate_per_kw > cur_feerate {
2620 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2621 cur_feerate, feerate_per_kw);
2625 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2631 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2632 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2633 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2634 // outside of those situations will fail.
2635 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2639 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2644 1 + // script length (0)
2648 )*4 + // * 4 for non-witness parts
2649 2 + // witness marker and flag
2650 1 + // witness element count
2651 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2652 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2653 2*(1 + 71); // two signatures + sighash type flags
2654 if let Some(spk) = a_scriptpubkey {
2655 ret += ((8+1) + // output values and script length
2656 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2658 if let Some(spk) = b_scriptpubkey {
2659 ret += ((8+1) + // output values and script length
2660 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2666 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2667 assert!(self.context.pending_inbound_htlcs.is_empty());
2668 assert!(self.context.pending_outbound_htlcs.is_empty());
2669 assert!(self.context.pending_update_fee.is_none());
2671 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2672 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2673 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2675 if value_to_holder < 0 {
2676 assert!(self.context.is_outbound());
2677 total_fee_satoshis += (-value_to_holder) as u64;
2678 } else if value_to_counterparty < 0 {
2679 assert!(!self.context.is_outbound());
2680 total_fee_satoshis += (-value_to_counterparty) as u64;
2683 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2684 value_to_counterparty = 0;
2687 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2688 value_to_holder = 0;
2691 assert!(self.context.shutdown_scriptpubkey.is_some());
2692 let holder_shutdown_script = self.get_closing_scriptpubkey();
2693 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2694 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2696 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2697 (closing_transaction, total_fee_satoshis)
2700 fn funding_outpoint(&self) -> OutPoint {
2701 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2704 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2707 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2708 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2710 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2712 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2713 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2714 where L::Target: Logger {
2715 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2716 // (see equivalent if condition there).
2717 assert!(self.context.channel_state.should_force_holding_cell());
2718 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2719 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2720 self.context.latest_monitor_update_id = mon_update_id;
2721 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2722 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2726 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2727 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2728 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2729 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2731 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2732 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2735 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2736 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2737 // these, but for now we just have to treat them as normal.
2739 let mut pending_idx = core::usize::MAX;
2740 let mut htlc_value_msat = 0;
2741 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2742 if htlc.htlc_id == htlc_id_arg {
2743 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2744 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2745 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2747 InboundHTLCState::Committed => {},
2748 InboundHTLCState::LocalRemoved(ref reason) => {
2749 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2751 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2752 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2754 return UpdateFulfillFetch::DuplicateClaim {};
2757 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2758 // Don't return in release mode here so that we can update channel_monitor
2762 htlc_value_msat = htlc.amount_msat;
2766 if pending_idx == core::usize::MAX {
2767 #[cfg(any(test, fuzzing))]
2768 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2769 // this is simply a duplicate claim, not previously failed and we lost funds.
2770 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2771 return UpdateFulfillFetch::DuplicateClaim {};
2774 // Now update local state:
2776 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2777 // can claim it even if the channel hits the chain before we see their next commitment.
2778 self.context.latest_monitor_update_id += 1;
2779 let monitor_update = ChannelMonitorUpdate {
2780 update_id: self.context.latest_monitor_update_id,
2781 counterparty_node_id: Some(self.context.counterparty_node_id),
2782 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2783 payment_preimage: payment_preimage_arg.clone(),
2787 if self.context.channel_state.should_force_holding_cell() {
2788 // Note that this condition is the same as the assertion in
2789 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2790 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2791 // do not not get into this branch.
2792 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2793 match pending_update {
2794 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2795 if htlc_id_arg == htlc_id {
2796 // Make sure we don't leave latest_monitor_update_id incremented here:
2797 self.context.latest_monitor_update_id -= 1;
2798 #[cfg(any(test, fuzzing))]
2799 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2800 return UpdateFulfillFetch::DuplicateClaim {};
2803 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2804 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2806 if htlc_id_arg == htlc_id {
2807 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2808 // TODO: We may actually be able to switch to a fulfill here, though its
2809 // rare enough it may not be worth the complexity burden.
2810 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2811 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2817 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
2818 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2819 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2821 #[cfg(any(test, fuzzing))]
2822 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2823 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2825 #[cfg(any(test, fuzzing))]
2826 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2829 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2830 if let InboundHTLCState::Committed = htlc.state {
2832 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2833 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2835 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2836 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2839 UpdateFulfillFetch::NewClaim {
2842 msg: Some(msgs::UpdateFulfillHTLC {
2843 channel_id: self.context.channel_id(),
2844 htlc_id: htlc_id_arg,
2845 payment_preimage: payment_preimage_arg,
2850 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2851 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2852 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2853 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2854 // Even if we aren't supposed to let new monitor updates with commitment state
2855 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2856 // matter what. Sadly, to push a new monitor update which flies before others
2857 // already queued, we have to insert it into the pending queue and update the
2858 // update_ids of all the following monitors.
2859 if release_cs_monitor && msg.is_some() {
2860 let mut additional_update = self.build_commitment_no_status_check(logger);
2861 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2862 // to be strictly increasing by one, so decrement it here.
2863 self.context.latest_monitor_update_id = monitor_update.update_id;
2864 monitor_update.updates.append(&mut additional_update.updates);
2866 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2867 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2868 monitor_update.update_id = new_mon_id;
2869 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2870 held_update.update.update_id += 1;
2873 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2874 let update = self.build_commitment_no_status_check(logger);
2875 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2881 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2882 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2884 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2888 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2889 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2890 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2891 /// before we fail backwards.
2893 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2894 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2895 /// [`ChannelError::Ignore`].
2896 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2897 -> Result<(), ChannelError> where L::Target: Logger {
2898 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2899 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2902 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
2903 /// want to fail blinded HTLCs where we are not the intro node.
2905 /// See [`Self::queue_fail_htlc`] for more info.
2906 pub fn queue_fail_malformed_htlc<L: Deref>(
2907 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
2908 ) -> Result<(), ChannelError> where L::Target: Logger {
2909 self.fail_htlc(htlc_id_arg, (failure_code, sha256_of_onion), true, logger)
2910 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2913 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2914 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2915 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2916 /// before we fail backwards.
2918 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2919 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2920 /// [`ChannelError::Ignore`].
2921 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
2922 &mut self, htlc_id_arg: u64, err_packet: E, mut force_holding_cell: bool,
2924 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
2925 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2926 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2929 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2930 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2931 // these, but for now we just have to treat them as normal.
2933 let mut pending_idx = core::usize::MAX;
2934 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2935 if htlc.htlc_id == htlc_id_arg {
2937 InboundHTLCState::Committed => {},
2938 InboundHTLCState::LocalRemoved(ref reason) => {
2939 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2941 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2946 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2947 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2953 if pending_idx == core::usize::MAX {
2954 #[cfg(any(test, fuzzing))]
2955 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2956 // is simply a duplicate fail, not previously failed and we failed-back too early.
2957 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2961 if self.context.channel_state.should_force_holding_cell() {
2962 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2963 force_holding_cell = true;
2966 // Now update local state:
2967 if force_holding_cell {
2968 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2969 match pending_update {
2970 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2971 if htlc_id_arg == htlc_id {
2972 #[cfg(any(test, fuzzing))]
2973 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2977 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2978 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2980 if htlc_id_arg == htlc_id {
2981 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2982 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2988 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2989 self.context.holding_cell_htlc_updates.push(err_packet.to_htlc_update_awaiting_ack(htlc_id_arg));
2993 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
2994 E::Message::name(), &self.context.channel_id());
2996 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2997 htlc.state = err_packet.clone().to_inbound_htlc_state();
3000 Ok(Some(err_packet.to_message(htlc_id_arg, self.context.channel_id())))
3003 // Message handlers:
3004 /// Updates the state of the channel to indicate that all channels in the batch have received
3005 /// funding_signed and persisted their monitors.
3006 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
3007 /// treated as a non-batch channel going forward.
3008 pub fn set_batch_ready(&mut self) {
3009 self.context.is_batch_funding = None;
3010 self.context.channel_state.clear_waiting_for_batch();
3013 /// Unsets the existing funding information.
3015 /// This must only be used if the channel has not yet completed funding and has not been used.
3017 /// Further, the channel must be immediately shut down after this with a call to
3018 /// [`ChannelContext::force_shutdown`].
3019 pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
3020 debug_assert!(matches!(
3021 self.context.channel_state, ChannelState::AwaitingChannelReady(_)
3023 self.context.channel_transaction_parameters.funding_outpoint = None;
3024 self.context.channel_id = temporary_channel_id;
3027 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3028 /// and the channel is now usable (and public), this may generate an announcement_signatures to
3030 pub fn channel_ready<NS: Deref, L: Deref>(
3031 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
3032 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3033 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3035 NS::Target: NodeSigner,
3038 if self.context.channel_state.is_peer_disconnected() {
3039 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3040 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3043 if let Some(scid_alias) = msg.short_channel_id_alias {
3044 if Some(scid_alias) != self.context.short_channel_id {
3045 // The scid alias provided can be used to route payments *from* our counterparty,
3046 // i.e. can be used for inbound payments and provided in invoices, but is not used
3047 // when routing outbound payments.
3048 self.context.latest_inbound_scid_alias = Some(scid_alias);
3052 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3053 // batch, but we can receive channel_ready messages.
3054 let mut check_reconnection = false;
3055 match &self.context.channel_state {
3056 ChannelState::AwaitingChannelReady(flags) => {
3057 let flags = *flags & !FundedStateFlags::ALL;
3058 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3059 if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3060 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3061 check_reconnection = true;
3062 } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3063 self.context.channel_state.set_their_channel_ready();
3064 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3065 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3066 self.context.update_time_counter += 1;
3068 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3069 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3072 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3073 ChannelState::ChannelReady(_) => check_reconnection = true,
3074 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
3076 if check_reconnection {
3077 // They probably disconnected/reconnected and re-sent the channel_ready, which is
3078 // required, or they're sending a fresh SCID alias.
3079 let expected_point =
3080 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3081 // If they haven't ever sent an updated point, the point they send should match
3083 self.context.counterparty_cur_commitment_point
3084 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3085 // If we've advanced the commitment number once, the second commitment point is
3086 // at `counterparty_prev_commitment_point`, which is not yet revoked.
3087 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3088 self.context.counterparty_prev_commitment_point
3090 // If they have sent updated points, channel_ready is always supposed to match
3091 // their "first" point, which we re-derive here.
3092 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3093 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3094 ).expect("We already advanced, so previous secret keys should have been validated already")))
3096 if expected_point != Some(msg.next_per_commitment_point) {
3097 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3102 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3103 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3105 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3107 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3110 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3111 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3112 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3113 ) -> Result<(), ChannelError>
3114 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3115 FE::Target: FeeEstimator, L::Target: Logger,
3117 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3118 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3120 // We can't accept HTLCs sent after we've sent a shutdown.
3121 if self.context.channel_state.is_local_shutdown_sent() {
3122 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3124 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3125 if self.context.channel_state.is_remote_shutdown_sent() {
3126 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3128 if self.context.channel_state.is_peer_disconnected() {
3129 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3131 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3132 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3134 if msg.amount_msat == 0 {
3135 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3137 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3138 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3141 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3142 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3143 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3144 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3146 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3147 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3150 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3151 // the reserve_satoshis we told them to always have as direct payment so that they lose
3152 // something if we punish them for broadcasting an old state).
3153 // Note that we don't really care about having a small/no to_remote output in our local
3154 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3155 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3156 // present in the next commitment transaction we send them (at least for fulfilled ones,
3157 // failed ones won't modify value_to_self).
3158 // Note that we will send HTLCs which another instance of rust-lightning would think
3159 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3160 // Channel state once they will not be present in the next received commitment
3162 let mut removed_outbound_total_msat = 0;
3163 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3164 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3165 removed_outbound_total_msat += htlc.amount_msat;
3166 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3167 removed_outbound_total_msat += htlc.amount_msat;
3171 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3172 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3175 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3176 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3177 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3179 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3180 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3181 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3182 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3183 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3184 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3185 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3189 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3190 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3191 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3192 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3193 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3194 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3195 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3199 let pending_value_to_self_msat =
3200 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3201 let pending_remote_value_msat =
3202 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3203 if pending_remote_value_msat < msg.amount_msat {
3204 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3207 // Check that the remote can afford to pay for this HTLC on-chain at the current
3208 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3210 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3211 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3212 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3214 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3215 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3219 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3220 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3222 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3223 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3227 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3228 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3232 if !self.context.is_outbound() {
3233 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3234 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3235 // side, only on the sender's. Note that with anchor outputs we are no longer as
3236 // sensitive to fee spikes, so we need to account for them.
3237 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3238 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3239 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3240 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3242 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3243 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3244 // the HTLC, i.e. its status is already set to failing.
3245 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3246 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3249 // Check that they won't violate our local required channel reserve by adding this HTLC.
3250 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3251 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3252 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3253 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3256 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3257 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3259 if msg.cltv_expiry >= 500000000 {
3260 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3263 if self.context.channel_state.is_local_shutdown_sent() {
3264 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3265 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3269 // Now update local state:
3270 self.context.next_counterparty_htlc_id += 1;
3271 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3272 htlc_id: msg.htlc_id,
3273 amount_msat: msg.amount_msat,
3274 payment_hash: msg.payment_hash,
3275 cltv_expiry: msg.cltv_expiry,
3276 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3281 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3283 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3284 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3285 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3286 if htlc.htlc_id == htlc_id {
3287 let outcome = match check_preimage {
3288 None => fail_reason.into(),
3289 Some(payment_preimage) => {
3290 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3291 if payment_hash != htlc.payment_hash {
3292 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3294 OutboundHTLCOutcome::Success(Some(payment_preimage))
3298 OutboundHTLCState::LocalAnnounced(_) =>
3299 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3300 OutboundHTLCState::Committed => {
3301 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3303 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3304 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3309 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3312 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3313 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3314 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3316 if self.context.channel_state.is_peer_disconnected() {
3317 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3320 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3323 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3324 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3325 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3327 if self.context.channel_state.is_peer_disconnected() {
3328 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3331 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3335 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3336 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3337 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3339 if self.context.channel_state.is_peer_disconnected() {
3340 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3343 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3347 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3348 where L::Target: Logger
3350 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3351 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3353 if self.context.channel_state.is_peer_disconnected() {
3354 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3356 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3357 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3360 let funding_script = self.context.get_funding_redeemscript();
3362 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3364 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3365 let commitment_txid = {
3366 let trusted_tx = commitment_stats.tx.trust();
3367 let bitcoin_tx = trusted_tx.built_transaction();
3368 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3370 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3371 log_bytes!(msg.signature.serialize_compact()[..]),
3372 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3373 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3374 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3375 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3379 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3381 // If our counterparty updated the channel fee in this commitment transaction, check that
3382 // they can actually afford the new fee now.
3383 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3384 update_state == FeeUpdateState::RemoteAnnounced
3387 debug_assert!(!self.context.is_outbound());
3388 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3389 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3390 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3393 #[cfg(any(test, fuzzing))]
3395 if self.context.is_outbound() {
3396 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3397 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3398 if let Some(info) = projected_commit_tx_info {
3399 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3400 + self.context.holding_cell_htlc_updates.len();
3401 if info.total_pending_htlcs == total_pending_htlcs
3402 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3403 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3404 && info.feerate == self.context.feerate_per_kw {
3405 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3411 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3412 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3415 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3416 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3417 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3418 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3419 // backwards compatibility, we never use it in production. To provide test coverage, here,
3420 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3421 #[allow(unused_assignments, unused_mut)]
3422 let mut separate_nondust_htlc_sources = false;
3423 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3424 use core::hash::{BuildHasher, Hasher};
3425 // Get a random value using the only std API to do so - the DefaultHasher
3426 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3427 separate_nondust_htlc_sources = rand_val % 2 == 0;
3430 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3431 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3432 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3433 if let Some(_) = htlc.transaction_output_index {
3434 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3435 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3436 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3438 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3439 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3440 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3441 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3442 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3443 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3444 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3445 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3447 if !separate_nondust_htlc_sources {
3448 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3451 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3453 if separate_nondust_htlc_sources {
3454 if let Some(source) = source_opt.take() {
3455 nondust_htlc_sources.push(source);
3458 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3461 let holder_commitment_tx = HolderCommitmentTransaction::new(
3462 commitment_stats.tx,
3464 msg.htlc_signatures.clone(),
3465 &self.context.get_holder_pubkeys().funding_pubkey,
3466 self.context.counterparty_funding_pubkey()
3469 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3470 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3472 // Update state now that we've passed all the can-fail calls...
3473 let mut need_commitment = false;
3474 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3475 if *update_state == FeeUpdateState::RemoteAnnounced {
3476 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3477 need_commitment = true;
3481 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3482 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3483 Some(forward_info.clone())
3485 if let Some(forward_info) = new_forward {
3486 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3487 &htlc.payment_hash, &self.context.channel_id);
3488 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3489 need_commitment = true;
3492 let mut claimed_htlcs = Vec::new();
3493 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3494 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3495 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3496 &htlc.payment_hash, &self.context.channel_id);
3497 // Grab the preimage, if it exists, instead of cloning
3498 let mut reason = OutboundHTLCOutcome::Success(None);
3499 mem::swap(outcome, &mut reason);
3500 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3501 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3502 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3503 // have a `Success(None)` reason. In this case we could forget some HTLC
3504 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3505 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3507 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3509 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3510 need_commitment = true;
3514 self.context.latest_monitor_update_id += 1;
3515 let mut monitor_update = ChannelMonitorUpdate {
3516 update_id: self.context.latest_monitor_update_id,
3517 counterparty_node_id: Some(self.context.counterparty_node_id),
3518 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3519 commitment_tx: holder_commitment_tx,
3520 htlc_outputs: htlcs_and_sigs,
3522 nondust_htlc_sources,
3526 self.context.cur_holder_commitment_transaction_number -= 1;
3527 self.context.expecting_peer_commitment_signed = false;
3528 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3529 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3530 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3532 if self.context.channel_state.is_monitor_update_in_progress() {
3533 // In case we initially failed monitor updating without requiring a response, we need
3534 // to make sure the RAA gets sent first.
3535 self.context.monitor_pending_revoke_and_ack = true;
3536 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3537 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3538 // the corresponding HTLC status updates so that
3539 // get_last_commitment_update_for_send includes the right HTLCs.
3540 self.context.monitor_pending_commitment_signed = true;
3541 let mut additional_update = self.build_commitment_no_status_check(logger);
3542 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3543 // strictly increasing by one, so decrement it here.
3544 self.context.latest_monitor_update_id = monitor_update.update_id;
3545 monitor_update.updates.append(&mut additional_update.updates);
3547 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3548 &self.context.channel_id);
3549 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3552 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3553 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3554 // we'll send one right away when we get the revoke_and_ack when we
3555 // free_holding_cell_htlcs().
3556 let mut additional_update = self.build_commitment_no_status_check(logger);
3557 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3558 // strictly increasing by one, so decrement it here.
3559 self.context.latest_monitor_update_id = monitor_update.update_id;
3560 monitor_update.updates.append(&mut additional_update.updates);
3564 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3565 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3566 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3567 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3570 /// Public version of the below, checking relevant preconditions first.
3571 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3572 /// returns `(None, Vec::new())`.
3573 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3574 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3575 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3576 where F::Target: FeeEstimator, L::Target: Logger
3578 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
3579 self.free_holding_cell_htlcs(fee_estimator, logger)
3580 } else { (None, Vec::new()) }
3583 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3584 /// for our counterparty.
3585 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3586 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3587 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3588 where F::Target: FeeEstimator, L::Target: Logger
3590 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3591 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3592 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3593 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3595 let mut monitor_update = ChannelMonitorUpdate {
3596 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3597 counterparty_node_id: Some(self.context.counterparty_node_id),
3598 updates: Vec::new(),
3601 let mut htlc_updates = Vec::new();
3602 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3603 let mut update_add_count = 0;
3604 let mut update_fulfill_count = 0;
3605 let mut update_fail_count = 0;
3606 let mut htlcs_to_fail = Vec::new();
3607 for htlc_update in htlc_updates.drain(..) {
3608 // Note that this *can* fail, though it should be due to rather-rare conditions on
3609 // fee races with adding too many outputs which push our total payments just over
3610 // the limit. In case it's less rare than I anticipate, we may want to revisit
3611 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3612 // to rebalance channels.
3613 match &htlc_update {
3614 &HTLCUpdateAwaitingACK::AddHTLC {
3615 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3616 skimmed_fee_msat, blinding_point, ..
3618 match self.send_htlc(
3619 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3620 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3622 Ok(_) => update_add_count += 1,
3625 ChannelError::Ignore(ref msg) => {
3626 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3627 // If we fail to send here, then this HTLC should
3628 // be failed backwards. Failing to send here
3629 // indicates that this HTLC may keep being put back
3630 // into the holding cell without ever being
3631 // successfully forwarded/failed/fulfilled, causing
3632 // our counterparty to eventually close on us.
3633 htlcs_to_fail.push((source.clone(), *payment_hash));
3636 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3642 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3643 // If an HTLC claim was previously added to the holding cell (via
3644 // `get_update_fulfill_htlc`, then generating the claim message itself must
3645 // not fail - any in between attempts to claim the HTLC will have resulted
3646 // in it hitting the holding cell again and we cannot change the state of a
3647 // holding cell HTLC from fulfill to anything else.
3648 let mut additional_monitor_update =
3649 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3650 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3651 { monitor_update } else { unreachable!() };
3652 update_fulfill_count += 1;
3653 monitor_update.updates.append(&mut additional_monitor_update.updates);
3655 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3656 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3657 Ok(update_fail_msg_option) => {
3658 // If an HTLC failure was previously added to the holding cell (via
3659 // `queue_fail_htlc`) then generating the fail message itself must
3660 // not fail - we should never end up in a state where we double-fail
3661 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3662 // for a full revocation before failing.
3663 debug_assert!(update_fail_msg_option.is_some());
3664 update_fail_count += 1;
3667 if let ChannelError::Ignore(_) = e {}
3669 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3674 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
3675 match self.fail_htlc(htlc_id, (failure_code, sha256_of_onion), false, logger) {
3676 Ok(update_fail_malformed_opt) => {
3677 debug_assert!(update_fail_malformed_opt.is_some()); // See above comment
3678 update_fail_count += 1;
3681 if let ChannelError::Ignore(_) = e {}
3683 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3690 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3691 return (None, htlcs_to_fail);
3693 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3694 self.send_update_fee(feerate, false, fee_estimator, logger)
3699 let mut additional_update = self.build_commitment_no_status_check(logger);
3700 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3701 // but we want them to be strictly increasing by one, so reset it here.
3702 self.context.latest_monitor_update_id = monitor_update.update_id;
3703 monitor_update.updates.append(&mut additional_update.updates);
3705 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3706 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3707 update_add_count, update_fulfill_count, update_fail_count);
3709 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3710 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3716 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3717 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3718 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3719 /// generating an appropriate error *after* the channel state has been updated based on the
3720 /// revoke_and_ack message.
3721 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3722 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3723 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3724 where F::Target: FeeEstimator, L::Target: Logger,
3726 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3727 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3729 if self.context.channel_state.is_peer_disconnected() {
3730 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3732 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3733 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3736 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3738 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3739 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3740 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3744 if !self.context.channel_state.is_awaiting_remote_revoke() {
3745 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3746 // haven't given them a new commitment transaction to broadcast). We should probably
3747 // take advantage of this by updating our channel monitor, sending them an error, and
3748 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3749 // lot of work, and there's some chance this is all a misunderstanding anyway.
3750 // We have to do *something*, though, since our signer may get mad at us for otherwise
3751 // jumping a remote commitment number, so best to just force-close and move on.
3752 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3755 #[cfg(any(test, fuzzing))]
3757 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3758 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3761 match &self.context.holder_signer {
3762 ChannelSignerType::Ecdsa(ecdsa) => {
3763 ecdsa.validate_counterparty_revocation(
3764 self.context.cur_counterparty_commitment_transaction_number + 1,
3766 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3768 // TODO (taproot|arik)
3773 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3774 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3775 self.context.latest_monitor_update_id += 1;
3776 let mut monitor_update = ChannelMonitorUpdate {
3777 update_id: self.context.latest_monitor_update_id,
3778 counterparty_node_id: Some(self.context.counterparty_node_id),
3779 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3780 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3781 secret: msg.per_commitment_secret,
3785 // Update state now that we've passed all the can-fail calls...
3786 // (note that we may still fail to generate the new commitment_signed message, but that's
3787 // OK, we step the channel here and *then* if the new generation fails we can fail the
3788 // channel based on that, but stepping stuff here should be safe either way.
3789 self.context.channel_state.clear_awaiting_remote_revoke();
3790 self.context.sent_message_awaiting_response = None;
3791 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3792 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3793 self.context.cur_counterparty_commitment_transaction_number -= 1;
3795 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3796 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3799 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3800 let mut to_forward_infos = Vec::new();
3801 let mut revoked_htlcs = Vec::new();
3802 let mut finalized_claimed_htlcs = Vec::new();
3803 let mut update_fail_htlcs = Vec::new();
3804 let mut update_fail_malformed_htlcs = Vec::new();
3805 let mut require_commitment = false;
3806 let mut value_to_self_msat_diff: i64 = 0;
3809 // Take references explicitly so that we can hold multiple references to self.context.
3810 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3811 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3812 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3814 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3815 pending_inbound_htlcs.retain(|htlc| {
3816 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3817 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3818 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3819 value_to_self_msat_diff += htlc.amount_msat as i64;
3821 *expecting_peer_commitment_signed = true;
3825 pending_outbound_htlcs.retain(|htlc| {
3826 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3827 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3828 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3829 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3831 finalized_claimed_htlcs.push(htlc.source.clone());
3832 // They fulfilled, so we sent them money
3833 value_to_self_msat_diff -= htlc.amount_msat as i64;
3838 for htlc in pending_inbound_htlcs.iter_mut() {
3839 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3841 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3845 let mut state = InboundHTLCState::Committed;
3846 mem::swap(&mut state, &mut htlc.state);
3848 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3849 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3850 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3851 require_commitment = true;
3852 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3853 match forward_info {
3854 PendingHTLCStatus::Fail(fail_msg) => {
3855 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3856 require_commitment = true;
3858 HTLCFailureMsg::Relay(msg) => {
3859 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3860 update_fail_htlcs.push(msg)
3862 HTLCFailureMsg::Malformed(msg) => {
3863 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3864 update_fail_malformed_htlcs.push(msg)
3868 PendingHTLCStatus::Forward(forward_info) => {
3869 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3870 to_forward_infos.push((forward_info, htlc.htlc_id));
3871 htlc.state = InboundHTLCState::Committed;
3877 for htlc in pending_outbound_htlcs.iter_mut() {
3878 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3879 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3880 htlc.state = OutboundHTLCState::Committed;
3881 *expecting_peer_commitment_signed = true;
3883 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3884 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3885 // Grab the preimage, if it exists, instead of cloning
3886 let mut reason = OutboundHTLCOutcome::Success(None);
3887 mem::swap(outcome, &mut reason);
3888 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3889 require_commitment = true;
3893 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3895 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3896 match update_state {
3897 FeeUpdateState::Outbound => {
3898 debug_assert!(self.context.is_outbound());
3899 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3900 self.context.feerate_per_kw = feerate;
3901 self.context.pending_update_fee = None;
3902 self.context.expecting_peer_commitment_signed = true;
3904 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3905 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3906 debug_assert!(!self.context.is_outbound());
3907 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3908 require_commitment = true;
3909 self.context.feerate_per_kw = feerate;
3910 self.context.pending_update_fee = None;
3915 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3916 let release_state_str =
3917 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3918 macro_rules! return_with_htlcs_to_fail {
3919 ($htlcs_to_fail: expr) => {
3920 if !release_monitor {
3921 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3922 update: monitor_update,
3924 return Ok(($htlcs_to_fail, None));
3926 return Ok(($htlcs_to_fail, Some(monitor_update)));
3931 if self.context.channel_state.is_monitor_update_in_progress() {
3932 // We can't actually generate a new commitment transaction (incl by freeing holding
3933 // cells) while we can't update the monitor, so we just return what we have.
3934 if require_commitment {
3935 self.context.monitor_pending_commitment_signed = true;
3936 // When the monitor updating is restored we'll call
3937 // get_last_commitment_update_for_send(), which does not update state, but we're
3938 // definitely now awaiting a remote revoke before we can step forward any more, so
3940 let mut additional_update = self.build_commitment_no_status_check(logger);
3941 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3942 // strictly increasing by one, so decrement it here.
3943 self.context.latest_monitor_update_id = monitor_update.update_id;
3944 monitor_update.updates.append(&mut additional_update.updates);
3946 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3947 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3948 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3949 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3950 return_with_htlcs_to_fail!(Vec::new());
3953 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3954 (Some(mut additional_update), htlcs_to_fail) => {
3955 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3956 // strictly increasing by one, so decrement it here.
3957 self.context.latest_monitor_update_id = monitor_update.update_id;
3958 monitor_update.updates.append(&mut additional_update.updates);
3960 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3961 &self.context.channel_id(), release_state_str);
3963 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3964 return_with_htlcs_to_fail!(htlcs_to_fail);
3966 (None, htlcs_to_fail) => {
3967 if require_commitment {
3968 let mut additional_update = self.build_commitment_no_status_check(logger);
3970 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3971 // strictly increasing by one, so decrement it here.
3972 self.context.latest_monitor_update_id = monitor_update.update_id;
3973 monitor_update.updates.append(&mut additional_update.updates);
3975 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3976 &self.context.channel_id(),
3977 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3980 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3981 return_with_htlcs_to_fail!(htlcs_to_fail);
3983 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3984 &self.context.channel_id(), release_state_str);
3986 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3987 return_with_htlcs_to_fail!(htlcs_to_fail);
3993 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3994 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3995 /// commitment update.
3996 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3997 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3998 where F::Target: FeeEstimator, L::Target: Logger
4000 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
4001 assert!(msg_opt.is_none(), "We forced holding cell?");
4004 /// Adds a pending update to this channel. See the doc for send_htlc for
4005 /// further details on the optionness of the return value.
4006 /// If our balance is too low to cover the cost of the next commitment transaction at the
4007 /// new feerate, the update is cancelled.
4009 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
4010 /// [`Channel`] if `force_holding_cell` is false.
4011 fn send_update_fee<F: Deref, L: Deref>(
4012 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
4013 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4014 ) -> Option<msgs::UpdateFee>
4015 where F::Target: FeeEstimator, L::Target: Logger
4017 if !self.context.is_outbound() {
4018 panic!("Cannot send fee from inbound channel");
4020 if !self.context.is_usable() {
4021 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
4023 if !self.context.is_live() {
4024 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
4027 // Before proposing a feerate update, check that we can actually afford the new fee.
4028 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
4029 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
4030 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4031 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4032 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
4033 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
4034 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4035 //TODO: auto-close after a number of failures?
4036 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
4040 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
4041 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4042 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4043 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4044 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4045 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4048 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4049 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4053 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4054 force_holding_cell = true;
4057 if force_holding_cell {
4058 self.context.holding_cell_update_fee = Some(feerate_per_kw);
4062 debug_assert!(self.context.pending_update_fee.is_none());
4063 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4065 Some(msgs::UpdateFee {
4066 channel_id: self.context.channel_id,
4071 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4072 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4074 /// No further message handling calls may be made until a channel_reestablish dance has
4076 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
4077 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
4078 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4079 if self.context.channel_state.is_pre_funded_state() {
4083 if self.context.channel_state.is_peer_disconnected() {
4084 // While the below code should be idempotent, it's simpler to just return early, as
4085 // redundant disconnect events can fire, though they should be rare.
4089 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4090 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4093 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4094 // will be retransmitted.
4095 self.context.last_sent_closing_fee = None;
4096 self.context.pending_counterparty_closing_signed = None;
4097 self.context.closing_fee_limits = None;
4099 let mut inbound_drop_count = 0;
4100 self.context.pending_inbound_htlcs.retain(|htlc| {
4102 InboundHTLCState::RemoteAnnounced(_) => {
4103 // They sent us an update_add_htlc but we never got the commitment_signed.
4104 // We'll tell them what commitment_signed we're expecting next and they'll drop
4105 // this HTLC accordingly
4106 inbound_drop_count += 1;
4109 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4110 // We received a commitment_signed updating this HTLC and (at least hopefully)
4111 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4112 // in response to it yet, so don't touch it.
4115 InboundHTLCState::Committed => true,
4116 InboundHTLCState::LocalRemoved(_) => {
4117 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4118 // re-transmit if needed) and they may have even sent a revoke_and_ack back
4119 // (that we missed). Keep this around for now and if they tell us they missed
4120 // the commitment_signed we can re-transmit the update then.
4125 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4127 if let Some((_, update_state)) = self.context.pending_update_fee {
4128 if update_state == FeeUpdateState::RemoteAnnounced {
4129 debug_assert!(!self.context.is_outbound());
4130 self.context.pending_update_fee = None;
4134 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4135 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4136 // They sent us an update to remove this but haven't yet sent the corresponding
4137 // commitment_signed, we need to move it back to Committed and they can re-send
4138 // the update upon reconnection.
4139 htlc.state = OutboundHTLCState::Committed;
4143 self.context.sent_message_awaiting_response = None;
4145 self.context.channel_state.set_peer_disconnected();
4146 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4150 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4151 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4152 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4153 /// update completes (potentially immediately).
4154 /// The messages which were generated with the monitor update must *not* have been sent to the
4155 /// remote end, and must instead have been dropped. They will be regenerated when
4156 /// [`Self::monitor_updating_restored`] is called.
4158 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4159 /// [`chain::Watch`]: crate::chain::Watch
4160 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4161 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4162 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4163 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4164 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4166 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4167 self.context.monitor_pending_commitment_signed |= resend_commitment;
4168 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4169 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4170 self.context.monitor_pending_failures.append(&mut pending_fails);
4171 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4172 self.context.channel_state.set_monitor_update_in_progress();
4175 /// Indicates that the latest ChannelMonitor update has been committed by the client
4176 /// successfully and we should restore normal operation. Returns messages which should be sent
4177 /// to the remote side.
4178 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4179 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4180 user_config: &UserConfig, best_block_height: u32
4181 ) -> MonitorRestoreUpdates
4184 NS::Target: NodeSigner
4186 assert!(self.context.channel_state.is_monitor_update_in_progress());
4187 self.context.channel_state.clear_monitor_update_in_progress();
4189 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4190 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4191 // first received the funding_signed.
4192 let mut funding_broadcastable =
4193 if self.context.is_outbound() &&
4194 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4195 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
4197 self.context.funding_transaction.take()
4199 // That said, if the funding transaction is already confirmed (ie we're active with a
4200 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4201 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4202 funding_broadcastable = None;
4205 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4206 // (and we assume the user never directly broadcasts the funding transaction and waits for
4207 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4208 // * an inbound channel that failed to persist the monitor on funding_created and we got
4209 // the funding transaction confirmed before the monitor was persisted, or
4210 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4211 let channel_ready = if self.context.monitor_pending_channel_ready {
4212 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4213 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4214 self.context.monitor_pending_channel_ready = false;
4215 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4216 Some(msgs::ChannelReady {
4217 channel_id: self.context.channel_id(),
4218 next_per_commitment_point,
4219 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4223 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4225 let mut accepted_htlcs = Vec::new();
4226 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4227 let mut failed_htlcs = Vec::new();
4228 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4229 let mut finalized_claimed_htlcs = Vec::new();
4230 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4232 if self.context.channel_state.is_peer_disconnected() {
4233 self.context.monitor_pending_revoke_and_ack = false;
4234 self.context.monitor_pending_commitment_signed = false;
4235 return MonitorRestoreUpdates {
4236 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4237 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4241 let raa = if self.context.monitor_pending_revoke_and_ack {
4242 Some(self.get_last_revoke_and_ack())
4244 let commitment_update = if self.context.monitor_pending_commitment_signed {
4245 self.get_last_commitment_update_for_send(logger).ok()
4247 if commitment_update.is_some() {
4248 self.mark_awaiting_response();
4251 self.context.monitor_pending_revoke_and_ack = false;
4252 self.context.monitor_pending_commitment_signed = false;
4253 let order = self.context.resend_order.clone();
4254 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4255 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4256 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4257 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4258 MonitorRestoreUpdates {
4259 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4263 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4264 where F::Target: FeeEstimator, L::Target: Logger
4266 if self.context.is_outbound() {
4267 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4269 if self.context.channel_state.is_peer_disconnected() {
4270 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4272 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4274 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4275 self.context.update_time_counter += 1;
4276 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4277 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4278 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4279 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4280 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4281 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4282 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4283 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4284 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4285 msg.feerate_per_kw, holder_tx_dust_exposure)));
4287 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4288 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4289 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4295 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4297 #[cfg(async_signing)]
4298 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4299 let commitment_update = if self.context.signer_pending_commitment_update {
4300 self.get_last_commitment_update_for_send(logger).ok()
4302 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4303 self.context.get_funding_signed_msg(logger).1
4305 let channel_ready = if funding_signed.is_some() {
4306 self.check_get_channel_ready(0)
4309 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
4310 if commitment_update.is_some() { "a" } else { "no" },
4311 if funding_signed.is_some() { "a" } else { "no" },
4312 if channel_ready.is_some() { "a" } else { "no" });
4314 SignerResumeUpdates {
4321 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4322 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4323 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4324 msgs::RevokeAndACK {
4325 channel_id: self.context.channel_id,
4326 per_commitment_secret,
4327 next_per_commitment_point,
4329 next_local_nonce: None,
4333 /// Gets the last commitment update for immediate sending to our peer.
4334 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4335 let mut update_add_htlcs = Vec::new();
4336 let mut update_fulfill_htlcs = Vec::new();
4337 let mut update_fail_htlcs = Vec::new();
4338 let mut update_fail_malformed_htlcs = Vec::new();
4340 for htlc in self.context.pending_outbound_htlcs.iter() {
4341 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4342 update_add_htlcs.push(msgs::UpdateAddHTLC {
4343 channel_id: self.context.channel_id(),
4344 htlc_id: htlc.htlc_id,
4345 amount_msat: htlc.amount_msat,
4346 payment_hash: htlc.payment_hash,
4347 cltv_expiry: htlc.cltv_expiry,
4348 onion_routing_packet: (**onion_packet).clone(),
4349 skimmed_fee_msat: htlc.skimmed_fee_msat,
4350 blinding_point: htlc.blinding_point,
4355 for htlc in self.context.pending_inbound_htlcs.iter() {
4356 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4358 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4359 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4360 channel_id: self.context.channel_id(),
4361 htlc_id: htlc.htlc_id,
4362 reason: err_packet.clone()
4365 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4366 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4367 channel_id: self.context.channel_id(),
4368 htlc_id: htlc.htlc_id,
4369 sha256_of_onion: sha256_of_onion.clone(),
4370 failure_code: failure_code.clone(),
4373 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4374 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4375 channel_id: self.context.channel_id(),
4376 htlc_id: htlc.htlc_id,
4377 payment_preimage: payment_preimage.clone(),
4384 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4385 Some(msgs::UpdateFee {
4386 channel_id: self.context.channel_id(),
4387 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4391 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4392 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4393 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4394 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4395 if self.context.signer_pending_commitment_update {
4396 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4397 self.context.signer_pending_commitment_update = false;
4401 #[cfg(not(async_signing))] {
4402 panic!("Failed to get signature for new commitment state");
4404 #[cfg(async_signing)] {
4405 if !self.context.signer_pending_commitment_update {
4406 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4407 self.context.signer_pending_commitment_update = true;
4412 Ok(msgs::CommitmentUpdate {
4413 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4418 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4419 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4420 if self.context.channel_state.is_local_shutdown_sent() {
4421 assert!(self.context.shutdown_scriptpubkey.is_some());
4422 Some(msgs::Shutdown {
4423 channel_id: self.context.channel_id,
4424 scriptpubkey: self.get_closing_scriptpubkey(),
4429 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4430 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4432 /// Some links printed in log lines are included here to check them during build (when run with
4433 /// `cargo doc --document-private-items`):
4434 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4435 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4436 pub fn channel_reestablish<L: Deref, NS: Deref>(
4437 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4438 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4439 ) -> Result<ReestablishResponses, ChannelError>
4442 NS::Target: NodeSigner
4444 if !self.context.channel_state.is_peer_disconnected() {
4445 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4446 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4447 // just close here instead of trying to recover.
4448 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4451 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4452 msg.next_local_commitment_number == 0 {
4453 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4456 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4457 if msg.next_remote_commitment_number > 0 {
4458 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4459 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4460 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4461 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4462 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4464 if msg.next_remote_commitment_number > our_commitment_transaction {
4465 macro_rules! log_and_panic {
4466 ($err_msg: expr) => {
4467 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4468 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4471 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4472 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4473 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4474 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4475 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4476 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4477 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4478 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4482 // Before we change the state of the channel, we check if the peer is sending a very old
4483 // commitment transaction number, if yes we send a warning message.
4484 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4485 return Err(ChannelError::Warn(format!(
4486 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4487 msg.next_remote_commitment_number,
4488 our_commitment_transaction
4492 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4493 // remaining cases either succeed or ErrorMessage-fail).
4494 self.context.channel_state.clear_peer_disconnected();
4495 self.context.sent_message_awaiting_response = None;
4497 let shutdown_msg = self.get_outbound_shutdown();
4499 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4501 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4502 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4503 if !self.context.channel_state.is_our_channel_ready() ||
4504 self.context.channel_state.is_monitor_update_in_progress() {
4505 if msg.next_remote_commitment_number != 0 {
4506 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4508 // Short circuit the whole handler as there is nothing we can resend them
4509 return Ok(ReestablishResponses {
4510 channel_ready: None,
4511 raa: None, commitment_update: None,
4512 order: RAACommitmentOrder::CommitmentFirst,
4513 shutdown_msg, announcement_sigs,
4517 // We have OurChannelReady set!
4518 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4519 return Ok(ReestablishResponses {
4520 channel_ready: Some(msgs::ChannelReady {
4521 channel_id: self.context.channel_id(),
4522 next_per_commitment_point,
4523 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4525 raa: None, commitment_update: None,
4526 order: RAACommitmentOrder::CommitmentFirst,
4527 shutdown_msg, announcement_sigs,
4531 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4532 // Remote isn't waiting on any RevokeAndACK from us!
4533 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4535 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4536 if self.context.channel_state.is_monitor_update_in_progress() {
4537 self.context.monitor_pending_revoke_and_ack = true;
4540 Some(self.get_last_revoke_and_ack())
4543 debug_assert!(false, "All values should have been handled in the four cases above");
4544 return Err(ChannelError::Close(format!(
4545 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4546 msg.next_remote_commitment_number,
4547 our_commitment_transaction
4551 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4552 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4553 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4554 // the corresponding revoke_and_ack back yet.
4555 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4556 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4557 self.mark_awaiting_response();
4559 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4561 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4562 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4563 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4564 Some(msgs::ChannelReady {
4565 channel_id: self.context.channel_id(),
4566 next_per_commitment_point,
4567 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4571 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4572 if required_revoke.is_some() {
4573 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4575 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4578 Ok(ReestablishResponses {
4579 channel_ready, shutdown_msg, announcement_sigs,
4580 raa: required_revoke,
4581 commitment_update: None,
4582 order: self.context.resend_order.clone(),
4584 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4585 if required_revoke.is_some() {
4586 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4588 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4591 if self.context.channel_state.is_monitor_update_in_progress() {
4592 self.context.monitor_pending_commitment_signed = true;
4593 Ok(ReestablishResponses {
4594 channel_ready, shutdown_msg, announcement_sigs,
4595 commitment_update: None, raa: None,
4596 order: self.context.resend_order.clone(),
4599 Ok(ReestablishResponses {
4600 channel_ready, shutdown_msg, announcement_sigs,
4601 raa: required_revoke,
4602 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4603 order: self.context.resend_order.clone(),
4606 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4607 Err(ChannelError::Close(format!(
4608 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4609 msg.next_local_commitment_number,
4610 next_counterparty_commitment_number,
4613 Err(ChannelError::Close(format!(
4614 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4615 msg.next_local_commitment_number,
4616 next_counterparty_commitment_number,
4621 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4622 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4623 /// at which point they will be recalculated.
4624 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4626 where F::Target: FeeEstimator
4628 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4630 // Propose a range from our current Background feerate to our Normal feerate plus our
4631 // force_close_avoidance_max_fee_satoshis.
4632 // If we fail to come to consensus, we'll have to force-close.
4633 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4634 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4635 // that we don't expect to need fee bumping
4636 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4637 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4639 // The spec requires that (when the channel does not have anchors) we only send absolute
4640 // channel fees no greater than the absolute channel fee on the current commitment
4641 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4642 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4643 // some force-closure by old nodes, but we wanted to close the channel anyway.
4645 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4646 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4647 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4648 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4651 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4652 // below our dust limit, causing the output to disappear. We don't bother handling this
4653 // case, however, as this should only happen if a channel is closed before any (material)
4654 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4655 // come to consensus with our counterparty on appropriate fees, however it should be a
4656 // relatively rare case. We can revisit this later, though note that in order to determine
4657 // if the funders' output is dust we have to know the absolute fee we're going to use.
4658 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4659 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4660 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4661 // We always add force_close_avoidance_max_fee_satoshis to our normal
4662 // feerate-calculated fee, but allow the max to be overridden if we're using a
4663 // target feerate-calculated fee.
4664 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4665 proposed_max_feerate as u64 * tx_weight / 1000)
4667 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4670 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4671 self.context.closing_fee_limits.clone().unwrap()
4674 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4675 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4676 /// this point if we're the funder we should send the initial closing_signed, and in any case
4677 /// shutdown should complete within a reasonable timeframe.
4678 fn closing_negotiation_ready(&self) -> bool {
4679 self.context.closing_negotiation_ready()
4682 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4683 /// an Err if no progress is being made and the channel should be force-closed instead.
4684 /// Should be called on a one-minute timer.
4685 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4686 if self.closing_negotiation_ready() {
4687 if self.context.closing_signed_in_flight {
4688 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4690 self.context.closing_signed_in_flight = true;
4696 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4697 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4698 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4699 where F::Target: FeeEstimator, L::Target: Logger
4701 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4702 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4703 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4704 // that closing_negotiation_ready checks this case (as well as a few others).
4705 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4706 return Ok((None, None, None));
4709 if !self.context.is_outbound() {
4710 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4711 return self.closing_signed(fee_estimator, &msg);
4713 return Ok((None, None, None));
4716 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4717 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4718 if self.context.expecting_peer_commitment_signed {
4719 return Ok((None, None, None));
4722 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4724 assert!(self.context.shutdown_scriptpubkey.is_some());
4725 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4726 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4727 our_min_fee, our_max_fee, total_fee_satoshis);
4729 match &self.context.holder_signer {
4730 ChannelSignerType::Ecdsa(ecdsa) => {
4732 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4733 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4735 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4736 Ok((Some(msgs::ClosingSigned {
4737 channel_id: self.context.channel_id,
4738 fee_satoshis: total_fee_satoshis,
4740 fee_range: Some(msgs::ClosingSignedFeeRange {
4741 min_fee_satoshis: our_min_fee,
4742 max_fee_satoshis: our_max_fee,
4746 // TODO (taproot|arik)
4752 // Marks a channel as waiting for a response from the counterparty. If it's not received
4753 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4755 fn mark_awaiting_response(&mut self) {
4756 self.context.sent_message_awaiting_response = Some(0);
4759 /// Determines whether we should disconnect the counterparty due to not receiving a response
4760 /// within our expected timeframe.
4762 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4763 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4764 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4767 // Don't disconnect when we're not waiting on a response.
4770 *ticks_elapsed += 1;
4771 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4775 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4776 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4778 if self.context.channel_state.is_peer_disconnected() {
4779 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4781 if self.context.channel_state.is_pre_funded_state() {
4782 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4783 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4784 // can do that via error message without getting a connection fail anyway...
4785 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4787 for htlc in self.context.pending_inbound_htlcs.iter() {
4788 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4789 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4792 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4794 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4795 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4798 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4799 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4800 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4803 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4806 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4807 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4808 // any further commitment updates after we set LocalShutdownSent.
4809 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
4811 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4814 assert!(send_shutdown);
4815 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4816 Ok(scriptpubkey) => scriptpubkey,
4817 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4819 if !shutdown_scriptpubkey.is_compatible(their_features) {
4820 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4822 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4827 // From here on out, we may not fail!
4829 self.context.channel_state.set_remote_shutdown_sent();
4830 self.context.update_time_counter += 1;
4832 let monitor_update = if update_shutdown_script {
4833 self.context.latest_monitor_update_id += 1;
4834 let monitor_update = ChannelMonitorUpdate {
4835 update_id: self.context.latest_monitor_update_id,
4836 counterparty_node_id: Some(self.context.counterparty_node_id),
4837 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4838 scriptpubkey: self.get_closing_scriptpubkey(),
4841 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4842 self.push_ret_blockable_mon_update(monitor_update)
4844 let shutdown = if send_shutdown {
4845 Some(msgs::Shutdown {
4846 channel_id: self.context.channel_id,
4847 scriptpubkey: self.get_closing_scriptpubkey(),
4851 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4852 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4853 // cell HTLCs and return them to fail the payment.
4854 self.context.holding_cell_update_fee = None;
4855 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4856 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4858 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4859 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4866 self.context.channel_state.set_local_shutdown_sent();
4867 self.context.update_time_counter += 1;
4869 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4872 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4873 let mut tx = closing_tx.trust().built_transaction().clone();
4875 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4877 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4878 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4879 let mut holder_sig = sig.serialize_der().to_vec();
4880 holder_sig.push(EcdsaSighashType::All as u8);
4881 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4882 cp_sig.push(EcdsaSighashType::All as u8);
4883 if funding_key[..] < counterparty_funding_key[..] {
4884 tx.input[0].witness.push(holder_sig);
4885 tx.input[0].witness.push(cp_sig);
4887 tx.input[0].witness.push(cp_sig);
4888 tx.input[0].witness.push(holder_sig);
4891 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4895 pub fn closing_signed<F: Deref>(
4896 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4897 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4898 where F::Target: FeeEstimator
4900 if !self.context.channel_state.is_both_sides_shutdown() {
4901 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4903 if self.context.channel_state.is_peer_disconnected() {
4904 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4906 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4907 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4909 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4910 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4913 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4914 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4917 if self.context.channel_state.is_monitor_update_in_progress() {
4918 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4919 return Ok((None, None, None));
4922 let funding_redeemscript = self.context.get_funding_redeemscript();
4923 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4924 if used_total_fee != msg.fee_satoshis {
4925 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4927 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4929 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4932 // The remote end may have decided to revoke their output due to inconsistent dust
4933 // limits, so check for that case by re-checking the signature here.
4934 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4935 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4936 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4940 for outp in closing_tx.trust().built_transaction().output.iter() {
4941 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4942 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4946 assert!(self.context.shutdown_scriptpubkey.is_some());
4947 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4948 if last_fee == msg.fee_satoshis {
4949 let shutdown_result = ShutdownResult {
4950 closure_reason: ClosureReason::CooperativeClosure,
4951 monitor_update: None,
4952 dropped_outbound_htlcs: Vec::new(),
4953 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4954 channel_id: self.context.channel_id,
4955 user_channel_id: self.context.user_id,
4956 channel_capacity_satoshis: self.context.channel_value_satoshis,
4957 counterparty_node_id: self.context.counterparty_node_id,
4958 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
4960 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4961 self.context.channel_state = ChannelState::ShutdownComplete;
4962 self.context.update_time_counter += 1;
4963 return Ok((None, Some(tx), Some(shutdown_result)));
4967 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4969 macro_rules! propose_fee {
4970 ($new_fee: expr) => {
4971 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4972 (closing_tx, $new_fee)
4974 self.build_closing_transaction($new_fee, false)
4977 return match &self.context.holder_signer {
4978 ChannelSignerType::Ecdsa(ecdsa) => {
4980 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4981 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4982 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4983 let shutdown_result = ShutdownResult {
4984 closure_reason: ClosureReason::CooperativeClosure,
4985 monitor_update: None,
4986 dropped_outbound_htlcs: Vec::new(),
4987 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4988 channel_id: self.context.channel_id,
4989 user_channel_id: self.context.user_id,
4990 channel_capacity_satoshis: self.context.channel_value_satoshis,
4991 counterparty_node_id: self.context.counterparty_node_id,
4992 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
4994 self.context.channel_state = ChannelState::ShutdownComplete;
4995 self.context.update_time_counter += 1;
4996 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4997 (Some(tx), Some(shutdown_result))
5002 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
5003 Ok((Some(msgs::ClosingSigned {
5004 channel_id: self.context.channel_id,
5005 fee_satoshis: used_fee,
5007 fee_range: Some(msgs::ClosingSignedFeeRange {
5008 min_fee_satoshis: our_min_fee,
5009 max_fee_satoshis: our_max_fee,
5011 }), signed_tx, shutdown_result))
5013 // TODO (taproot|arik)
5020 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
5021 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
5022 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
5024 if max_fee_satoshis < our_min_fee {
5025 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
5027 if min_fee_satoshis > our_max_fee {
5028 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
5031 if !self.context.is_outbound() {
5032 // They have to pay, so pick the highest fee in the overlapping range.
5033 // We should never set an upper bound aside from their full balance
5034 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
5035 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
5037 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
5038 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
5039 msg.fee_satoshis, our_min_fee, our_max_fee)));
5041 // The proposed fee is in our acceptable range, accept it and broadcast!
5042 propose_fee!(msg.fee_satoshis);
5045 // Old fee style negotiation. We don't bother to enforce whether they are complying
5046 // with the "making progress" requirements, we just comply and hope for the best.
5047 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
5048 if msg.fee_satoshis > last_fee {
5049 if msg.fee_satoshis < our_max_fee {
5050 propose_fee!(msg.fee_satoshis);
5051 } else if last_fee < our_max_fee {
5052 propose_fee!(our_max_fee);
5054 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5057 if msg.fee_satoshis > our_min_fee {
5058 propose_fee!(msg.fee_satoshis);
5059 } else if last_fee > our_min_fee {
5060 propose_fee!(our_min_fee);
5062 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5066 if msg.fee_satoshis < our_min_fee {
5067 propose_fee!(our_min_fee);
5068 } else if msg.fee_satoshis > our_max_fee {
5069 propose_fee!(our_max_fee);
5071 propose_fee!(msg.fee_satoshis);
5077 fn internal_htlc_satisfies_config(
5078 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
5079 ) -> Result<(), (&'static str, u16)> {
5080 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
5081 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
5082 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
5083 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5085 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5086 0x1000 | 12, // fee_insufficient
5089 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5091 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5092 0x1000 | 13, // incorrect_cltv_expiry
5098 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5099 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5100 /// unsuccessful, falls back to the previous one if one exists.
5101 pub fn htlc_satisfies_config(
5102 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5103 ) -> Result<(), (&'static str, u16)> {
5104 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5106 if let Some(prev_config) = self.context.prev_config() {
5107 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5114 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5115 self.context.cur_holder_commitment_transaction_number + 1
5118 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5119 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
5122 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5123 self.context.cur_counterparty_commitment_transaction_number + 2
5127 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5128 &self.context.holder_signer
5132 pub fn get_value_stat(&self) -> ChannelValueStat {
5134 value_to_self_msat: self.context.value_to_self_msat,
5135 channel_value_msat: self.context.channel_value_satoshis * 1000,
5136 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5137 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5138 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5139 holding_cell_outbound_amount_msat: {
5141 for h in self.context.holding_cell_htlc_updates.iter() {
5143 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5151 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5152 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5156 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5157 /// Allowed in any state (including after shutdown)
5158 pub fn is_awaiting_monitor_update(&self) -> bool {
5159 self.context.channel_state.is_monitor_update_in_progress()
5162 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5163 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5164 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5165 self.context.blocked_monitor_updates[0].update.update_id - 1
5168 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5169 /// further blocked monitor update exists after the next.
5170 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5171 if self.context.blocked_monitor_updates.is_empty() { return None; }
5172 Some((self.context.blocked_monitor_updates.remove(0).update,
5173 !self.context.blocked_monitor_updates.is_empty()))
5176 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5177 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5178 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5179 -> Option<ChannelMonitorUpdate> {
5180 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5181 if !release_monitor {
5182 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5191 pub fn blocked_monitor_updates_pending(&self) -> usize {
5192 self.context.blocked_monitor_updates.len()
5195 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5196 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5197 /// transaction. If the channel is inbound, this implies simply that the channel has not
5199 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5200 if !self.is_awaiting_monitor_update() { return false; }
5202 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5203 if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
5205 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5206 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5207 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5210 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5211 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5212 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5213 // waiting for the initial monitor persistence. Thus, we check if our commitment
5214 // transaction numbers have both been iterated only exactly once (for the
5215 // funding_signed), and we're awaiting monitor update.
5217 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5218 // only way to get an awaiting-monitor-update state during initial funding is if the
5219 // initial monitor persistence is still pending).
5221 // Because deciding we're awaiting initial broadcast spuriously could result in
5222 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5223 // we hard-assert here, even in production builds.
5224 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5225 assert!(self.context.monitor_pending_channel_ready);
5226 assert_eq!(self.context.latest_monitor_update_id, 0);
5232 /// Returns true if our channel_ready has been sent
5233 pub fn is_our_channel_ready(&self) -> bool {
5234 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5235 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5238 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5239 pub fn received_shutdown(&self) -> bool {
5240 self.context.channel_state.is_remote_shutdown_sent()
5243 /// Returns true if we either initiated or agreed to shut down the channel.
5244 pub fn sent_shutdown(&self) -> bool {
5245 self.context.channel_state.is_local_shutdown_sent()
5248 /// Returns true if this channel is fully shut down. True here implies that no further actions
5249 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5250 /// will be handled appropriately by the chain monitor.
5251 pub fn is_shutdown(&self) -> bool {
5252 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5255 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5256 self.context.channel_update_status
5259 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5260 self.context.update_time_counter += 1;
5261 self.context.channel_update_status = status;
5264 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5266 // * always when a new block/transactions are confirmed with the new height
5267 // * when funding is signed with a height of 0
5268 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5272 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5273 if funding_tx_confirmations <= 0 {
5274 self.context.funding_tx_confirmation_height = 0;
5277 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5281 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5282 // channel_ready yet.
5283 if self.context.signer_pending_funding {
5287 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5288 // channel_ready until the entire batch is ready.
5289 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
5290 self.context.channel_state.set_our_channel_ready();
5292 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5293 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5294 self.context.update_time_counter += 1;
5296 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5297 // We got a reorg but not enough to trigger a force close, just ignore.
5300 if self.context.funding_tx_confirmation_height != 0 &&
5301 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5303 // We should never see a funding transaction on-chain until we've received
5304 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5305 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5306 // however, may do this and we shouldn't treat it as a bug.
5307 #[cfg(not(fuzzing))]
5308 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5309 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5310 self.context.channel_state.to_u32());
5312 // We got a reorg but not enough to trigger a force close, just ignore.
5316 if need_commitment_update {
5317 if !self.context.channel_state.is_monitor_update_in_progress() {
5318 if !self.context.channel_state.is_peer_disconnected() {
5319 let next_per_commitment_point =
5320 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5321 return Some(msgs::ChannelReady {
5322 channel_id: self.context.channel_id,
5323 next_per_commitment_point,
5324 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5328 self.context.monitor_pending_channel_ready = true;
5334 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5335 /// In the first case, we store the confirmation height and calculating the short channel id.
5336 /// In the second, we simply return an Err indicating we need to be force-closed now.
5337 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5338 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5339 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5340 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5342 NS::Target: NodeSigner,
5345 let mut msgs = (None, None);
5346 if let Some(funding_txo) = self.context.get_funding_txo() {
5347 for &(index_in_block, tx) in txdata.iter() {
5348 // Check if the transaction is the expected funding transaction, and if it is,
5349 // check that it pays the right amount to the right script.
5350 if self.context.funding_tx_confirmation_height == 0 {
5351 if tx.txid() == funding_txo.txid {
5352 let txo_idx = funding_txo.index as usize;
5353 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5354 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5355 if self.context.is_outbound() {
5356 // If we generated the funding transaction and it doesn't match what it
5357 // should, the client is really broken and we should just panic and
5358 // tell them off. That said, because hash collisions happen with high
5359 // probability in fuzzing mode, if we're fuzzing we just close the
5360 // channel and move on.
5361 #[cfg(not(fuzzing))]
5362 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5364 self.context.update_time_counter += 1;
5365 let err_reason = "funding tx had wrong script/value or output index";
5366 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5368 if self.context.is_outbound() {
5369 if !tx.is_coin_base() {
5370 for input in tx.input.iter() {
5371 if input.witness.is_empty() {
5372 // We generated a malleable funding transaction, implying we've
5373 // just exposed ourselves to funds loss to our counterparty.
5374 #[cfg(not(fuzzing))]
5375 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5380 self.context.funding_tx_confirmation_height = height;
5381 self.context.funding_tx_confirmed_in = Some(*block_hash);
5382 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5383 Ok(scid) => Some(scid),
5384 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5387 // If this is a coinbase transaction and not a 0-conf channel
5388 // we should update our min_depth to 100 to handle coinbase maturity
5389 if tx.is_coin_base() &&
5390 self.context.minimum_depth.unwrap_or(0) > 0 &&
5391 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5392 self.context.minimum_depth = Some(COINBASE_MATURITY);
5395 // If we allow 1-conf funding, we may need to check for channel_ready here and
5396 // send it immediately instead of waiting for a best_block_updated call (which
5397 // may have already happened for this block).
5398 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5399 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5400 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5401 msgs = (Some(channel_ready), announcement_sigs);
5404 for inp in tx.input.iter() {
5405 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5406 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5407 return Err(ClosureReason::CommitmentTxConfirmed);
5415 /// When a new block is connected, we check the height of the block against outbound holding
5416 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5417 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5418 /// handled by the ChannelMonitor.
5420 /// If we return Err, the channel may have been closed, at which point the standard
5421 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5424 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5426 pub fn best_block_updated<NS: Deref, L: Deref>(
5427 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5428 node_signer: &NS, user_config: &UserConfig, logger: &L
5429 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5431 NS::Target: NodeSigner,
5434 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5437 fn do_best_block_updated<NS: Deref, L: Deref>(
5438 &mut self, height: u32, highest_header_time: u32,
5439 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5440 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5442 NS::Target: NodeSigner,
5445 let mut timed_out_htlcs = Vec::new();
5446 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5447 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5449 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5450 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5452 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5453 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5454 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5462 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5464 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5465 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5466 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5468 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5469 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5472 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5473 self.context.channel_state.is_our_channel_ready() {
5474 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5475 if self.context.funding_tx_confirmation_height == 0 {
5476 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5477 // zero if it has been reorged out, however in either case, our state flags
5478 // indicate we've already sent a channel_ready
5479 funding_tx_confirmations = 0;
5482 // If we've sent channel_ready (or have both sent and received channel_ready), and
5483 // the funding transaction has become unconfirmed,
5484 // close the channel and hope we can get the latest state on chain (because presumably
5485 // the funding transaction is at least still in the mempool of most nodes).
5487 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5488 // 0-conf channel, but not doing so may lead to the
5489 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5491 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5492 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5493 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5494 return Err(ClosureReason::ProcessingError { err: err_reason });
5496 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5497 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5498 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5499 // If funding_tx_confirmed_in is unset, the channel must not be active
5500 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5501 assert!(!self.context.channel_state.is_our_channel_ready());
5502 return Err(ClosureReason::FundingTimedOut);
5505 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5506 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5508 Ok((None, timed_out_htlcs, announcement_sigs))
5511 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5512 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5513 /// before the channel has reached channel_ready and we can just wait for more blocks.
5514 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5515 if self.context.funding_tx_confirmation_height != 0 {
5516 // We handle the funding disconnection by calling best_block_updated with a height one
5517 // below where our funding was connected, implying a reorg back to conf_height - 1.
5518 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5519 // We use the time field to bump the current time we set on channel updates if its
5520 // larger. If we don't know that time has moved forward, we can just set it to the last
5521 // time we saw and it will be ignored.
5522 let best_time = self.context.update_time_counter;
5523 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
5524 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5525 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5526 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5527 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5533 // We never learned about the funding confirmation anyway, just ignore
5538 // Methods to get unprompted messages to send to the remote end (or where we already returned
5539 // something in the handler for the message that prompted this message):
5541 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5542 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5543 /// directions). Should be used for both broadcasted announcements and in response to an
5544 /// AnnouncementSignatures message from the remote peer.
5546 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5549 /// This will only return ChannelError::Ignore upon failure.
5551 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5552 fn get_channel_announcement<NS: Deref>(
5553 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5554 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5555 if !self.context.config.announced_channel {
5556 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5558 if !self.context.is_usable() {
5559 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5562 let short_channel_id = self.context.get_short_channel_id()
5563 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5564 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5565 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5566 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5567 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5569 let msg = msgs::UnsignedChannelAnnouncement {
5570 features: channelmanager::provided_channel_features(&user_config),
5573 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5574 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5575 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5576 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5577 excess_data: Vec::new(),
5583 fn get_announcement_sigs<NS: Deref, L: Deref>(
5584 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5585 best_block_height: u32, logger: &L
5586 ) -> Option<msgs::AnnouncementSignatures>
5588 NS::Target: NodeSigner,
5591 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5595 if !self.context.is_usable() {
5599 if self.context.channel_state.is_peer_disconnected() {
5600 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5604 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5608 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5609 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5612 log_trace!(logger, "{:?}", e);
5616 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5618 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5623 match &self.context.holder_signer {
5624 ChannelSignerType::Ecdsa(ecdsa) => {
5625 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5627 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5632 let short_channel_id = match self.context.get_short_channel_id() {
5634 None => return None,
5637 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5639 Some(msgs::AnnouncementSignatures {
5640 channel_id: self.context.channel_id(),
5642 node_signature: our_node_sig,
5643 bitcoin_signature: our_bitcoin_sig,
5646 // TODO (taproot|arik)
5652 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5654 fn sign_channel_announcement<NS: Deref>(
5655 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5656 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5657 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5658 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5659 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5660 let were_node_one = announcement.node_id_1 == our_node_key;
5662 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5663 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5664 match &self.context.holder_signer {
5665 ChannelSignerType::Ecdsa(ecdsa) => {
5666 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5667 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5668 Ok(msgs::ChannelAnnouncement {
5669 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5670 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5671 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5672 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5673 contents: announcement,
5676 // TODO (taproot|arik)
5681 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5685 /// Processes an incoming announcement_signatures message, providing a fully-signed
5686 /// channel_announcement message which we can broadcast and storing our counterparty's
5687 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5688 pub fn announcement_signatures<NS: Deref>(
5689 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5690 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5691 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5692 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5694 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5696 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5697 return Err(ChannelError::Close(format!(
5698 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5699 &announcement, self.context.get_counterparty_node_id())));
5701 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5702 return Err(ChannelError::Close(format!(
5703 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5704 &announcement, self.context.counterparty_funding_pubkey())));
5707 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5708 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5709 return Err(ChannelError::Ignore(
5710 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5713 self.sign_channel_announcement(node_signer, announcement)
5716 /// Gets a signed channel_announcement for this channel, if we previously received an
5717 /// announcement_signatures from our counterparty.
5718 pub fn get_signed_channel_announcement<NS: Deref>(
5719 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5720 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5721 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5724 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5726 Err(_) => return None,
5728 match self.sign_channel_announcement(node_signer, announcement) {
5729 Ok(res) => Some(res),
5734 /// May panic if called on a channel that wasn't immediately-previously
5735 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5736 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5737 assert!(self.context.channel_state.is_peer_disconnected());
5738 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5739 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5740 // current to_remote balances. However, it no longer has any use, and thus is now simply
5741 // set to a dummy (but valid, as required by the spec) public key.
5742 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5743 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5744 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5745 let mut pk = [2; 33]; pk[1] = 0xff;
5746 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5747 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5748 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5749 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5752 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5755 self.mark_awaiting_response();
5756 msgs::ChannelReestablish {
5757 channel_id: self.context.channel_id(),
5758 // The protocol has two different commitment number concepts - the "commitment
5759 // transaction number", which starts from 0 and counts up, and the "revocation key
5760 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5761 // commitment transaction numbers by the index which will be used to reveal the
5762 // revocation key for that commitment transaction, which means we have to convert them
5763 // to protocol-level commitment numbers here...
5765 // next_local_commitment_number is the next commitment_signed number we expect to
5766 // receive (indicating if they need to resend one that we missed).
5767 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5768 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5769 // receive, however we track it by the next commitment number for a remote transaction
5770 // (which is one further, as they always revoke previous commitment transaction, not
5771 // the one we send) so we have to decrement by 1. Note that if
5772 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5773 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
5775 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5776 your_last_per_commitment_secret: remote_last_secret,
5777 my_current_per_commitment_point: dummy_pubkey,
5778 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5779 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5780 // txid of that interactive transaction, else we MUST NOT set it.
5781 next_funding_txid: None,
5786 // Send stuff to our remote peers:
5788 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5789 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5790 /// commitment update.
5792 /// `Err`s will only be [`ChannelError::Ignore`].
5793 pub fn queue_add_htlc<F: Deref, L: Deref>(
5794 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5795 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5796 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5797 ) -> Result<(), ChannelError>
5798 where F::Target: FeeEstimator, L::Target: Logger
5801 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5802 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5803 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5805 if let ChannelError::Ignore(_) = err { /* fine */ }
5806 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5811 /// Adds a pending outbound HTLC to this channel, note that you probably want
5812 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5814 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5816 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5817 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5819 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5820 /// we may not yet have sent the previous commitment update messages and will need to
5821 /// regenerate them.
5823 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5824 /// on this [`Channel`] if `force_holding_cell` is false.
5826 /// `Err`s will only be [`ChannelError::Ignore`].
5827 fn send_htlc<F: Deref, L: Deref>(
5828 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5829 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5830 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5831 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5832 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5833 where F::Target: FeeEstimator, L::Target: Logger
5835 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5836 self.context.channel_state.is_local_shutdown_sent() ||
5837 self.context.channel_state.is_remote_shutdown_sent()
5839 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5841 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5842 if amount_msat > channel_total_msat {
5843 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5846 if amount_msat == 0 {
5847 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5850 let available_balances = self.context.get_available_balances(fee_estimator);
5851 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5852 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5853 available_balances.next_outbound_htlc_minimum_msat)));
5856 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5857 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5858 available_balances.next_outbound_htlc_limit_msat)));
5861 if self.context.channel_state.is_peer_disconnected() {
5862 // Note that this should never really happen, if we're !is_live() on receipt of an
5863 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5864 // the user to send directly into a !is_live() channel. However, if we
5865 // disconnected during the time the previous hop was doing the commitment dance we may
5866 // end up getting here after the forwarding delay. In any case, returning an
5867 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5868 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5871 let need_holding_cell = self.context.channel_state.should_force_holding_cell();
5872 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5873 payment_hash, amount_msat,
5874 if force_holding_cell { "into holding cell" }
5875 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5876 else { "to peer" });
5878 if need_holding_cell {
5879 force_holding_cell = true;
5882 // Now update local state:
5883 if force_holding_cell {
5884 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5889 onion_routing_packet,
5896 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5897 htlc_id: self.context.next_holder_htlc_id,
5899 payment_hash: payment_hash.clone(),
5901 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5907 let res = msgs::UpdateAddHTLC {
5908 channel_id: self.context.channel_id,
5909 htlc_id: self.context.next_holder_htlc_id,
5913 onion_routing_packet,
5917 self.context.next_holder_htlc_id += 1;
5922 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5923 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5924 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5925 // fail to generate this, we still are at least at a position where upgrading their status
5927 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5928 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5929 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5931 if let Some(state) = new_state {
5932 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5936 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5937 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5938 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5939 // Grab the preimage, if it exists, instead of cloning
5940 let mut reason = OutboundHTLCOutcome::Success(None);
5941 mem::swap(outcome, &mut reason);
5942 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5945 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5946 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5947 debug_assert!(!self.context.is_outbound());
5948 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5949 self.context.feerate_per_kw = feerate;
5950 self.context.pending_update_fee = None;
5953 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5955 let (mut htlcs_ref, counterparty_commitment_tx) =
5956 self.build_commitment_no_state_update(logger);
5957 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5958 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5959 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5961 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5962 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5965 self.context.latest_monitor_update_id += 1;
5966 let monitor_update = ChannelMonitorUpdate {
5967 update_id: self.context.latest_monitor_update_id,
5968 counterparty_node_id: Some(self.context.counterparty_node_id),
5969 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5970 commitment_txid: counterparty_commitment_txid,
5971 htlc_outputs: htlcs.clone(),
5972 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5973 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5974 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5975 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5976 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5979 self.context.channel_state.set_awaiting_remote_revoke();
5983 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5984 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5985 where L::Target: Logger
5987 let counterparty_keys = self.context.build_remote_transaction_keys();
5988 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5989 let counterparty_commitment_tx = commitment_stats.tx;
5991 #[cfg(any(test, fuzzing))]
5993 if !self.context.is_outbound() {
5994 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5995 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5996 if let Some(info) = projected_commit_tx_info {
5997 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5998 if info.total_pending_htlcs == total_pending_htlcs
5999 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
6000 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
6001 && info.feerate == self.context.feerate_per_kw {
6002 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
6003 assert_eq!(actual_fee, info.fee);
6009 (commitment_stats.htlcs_included, counterparty_commitment_tx)
6012 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
6013 /// generation when we shouldn't change HTLC/channel state.
6014 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
6015 // Get the fee tests from `build_commitment_no_state_update`
6016 #[cfg(any(test, fuzzing))]
6017 self.build_commitment_no_state_update(logger);
6019 let counterparty_keys = self.context.build_remote_transaction_keys();
6020 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6021 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
6023 match &self.context.holder_signer {
6024 ChannelSignerType::Ecdsa(ecdsa) => {
6025 let (signature, htlc_signatures);
6028 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
6029 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
6033 let res = ecdsa.sign_counterparty_commitment(
6034 &commitment_stats.tx,
6035 commitment_stats.inbound_htlc_preimages,
6036 commitment_stats.outbound_htlc_preimages,
6037 &self.context.secp_ctx,
6038 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
6040 htlc_signatures = res.1;
6042 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
6043 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
6044 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
6045 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
6047 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6048 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6049 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6050 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
6051 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
6052 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
6056 Ok((msgs::CommitmentSigned {
6057 channel_id: self.context.channel_id,
6061 partial_signature_with_nonce: None,
6062 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6064 // TODO (taproot|arik)
6070 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6071 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6073 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6074 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6075 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
6076 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
6077 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6078 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6079 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
6080 where F::Target: FeeEstimator, L::Target: Logger
6082 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
6083 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
6084 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6087 let monitor_update = self.build_commitment_no_status_check(logger);
6088 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6089 Ok(self.push_ret_blockable_mon_update(monitor_update))
6095 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
6097 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
6098 let new_forwarding_info = Some(CounterpartyForwardingInfo {
6099 fee_base_msat: msg.contents.fee_base_msat,
6100 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6101 cltv_expiry_delta: msg.contents.cltv_expiry_delta
6103 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
6105 self.context.counterparty_forwarding_info = new_forwarding_info;
6111 /// Begins the shutdown process, getting a message for the remote peer and returning all
6112 /// holding cell HTLCs for payment failure.
6113 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6114 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6115 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6117 for htlc in self.context.pending_outbound_htlcs.iter() {
6118 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6119 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6122 if self.context.channel_state.is_local_shutdown_sent() {
6123 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6125 else if self.context.channel_state.is_remote_shutdown_sent() {
6126 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6128 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6129 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6131 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6132 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6133 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6136 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6139 // use override shutdown script if provided
6140 let shutdown_scriptpubkey = match override_shutdown_script {
6141 Some(script) => script,
6143 // otherwise, use the shutdown scriptpubkey provided by the signer
6144 match signer_provider.get_shutdown_scriptpubkey() {
6145 Ok(scriptpubkey) => scriptpubkey,
6146 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6150 if !shutdown_scriptpubkey.is_compatible(their_features) {
6151 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6153 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6158 // From here on out, we may not fail!
6159 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6160 self.context.channel_state.set_local_shutdown_sent();
6161 self.context.update_time_counter += 1;
6163 let monitor_update = if update_shutdown_script {
6164 self.context.latest_monitor_update_id += 1;
6165 let monitor_update = ChannelMonitorUpdate {
6166 update_id: self.context.latest_monitor_update_id,
6167 counterparty_node_id: Some(self.context.counterparty_node_id),
6168 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6169 scriptpubkey: self.get_closing_scriptpubkey(),
6172 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6173 self.push_ret_blockable_mon_update(monitor_update)
6175 let shutdown = msgs::Shutdown {
6176 channel_id: self.context.channel_id,
6177 scriptpubkey: self.get_closing_scriptpubkey(),
6180 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6181 // our shutdown until we've committed all of the pending changes.
6182 self.context.holding_cell_update_fee = None;
6183 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6184 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6186 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6187 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6194 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6195 "we can't both complete shutdown and return a monitor update");
6197 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6200 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6201 self.context.holding_cell_htlc_updates.iter()
6202 .flat_map(|htlc_update| {
6204 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6205 => Some((source, payment_hash)),
6209 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6213 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6214 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6215 pub context: ChannelContext<SP>,
6216 pub unfunded_context: UnfundedChannelContext,
6219 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6220 pub fn new<ES: Deref, F: Deref>(
6221 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6222 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6223 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6224 ) -> Result<OutboundV1Channel<SP>, APIError>
6225 where ES::Target: EntropySource,
6226 F::Target: FeeEstimator
6228 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6229 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6230 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6231 let pubkeys = holder_signer.pubkeys().clone();
6233 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6234 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6236 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6237 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6239 let channel_value_msat = channel_value_satoshis * 1000;
6240 if push_msat > channel_value_msat {
6241 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6243 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6244 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6246 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6247 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6248 // Protocol level safety check in place, although it should never happen because
6249 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6250 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6253 let channel_type = Self::get_initial_channel_type(&config, their_features);
6254 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6256 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6257 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6259 (ConfirmationTarget::NonAnchorChannelFee, 0)
6261 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6263 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6264 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6265 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6266 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6269 let mut secp_ctx = Secp256k1::new();
6270 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6272 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6273 match signer_provider.get_shutdown_scriptpubkey() {
6274 Ok(scriptpubkey) => Some(scriptpubkey),
6275 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6279 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6280 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6281 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6285 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6286 Ok(script) => script,
6287 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6290 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6293 context: ChannelContext {
6296 config: LegacyChannelConfig {
6297 options: config.channel_config.clone(),
6298 announced_channel: config.channel_handshake_config.announced_channel,
6299 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6304 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6306 channel_id: temporary_channel_id,
6307 temporary_channel_id: Some(temporary_channel_id),
6308 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6309 announcement_sigs_state: AnnouncementSigsState::NotSent,
6311 channel_value_satoshis,
6313 latest_monitor_update_id: 0,
6315 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6316 shutdown_scriptpubkey,
6319 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6320 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6323 pending_inbound_htlcs: Vec::new(),
6324 pending_outbound_htlcs: Vec::new(),
6325 holding_cell_htlc_updates: Vec::new(),
6326 pending_update_fee: None,
6327 holding_cell_update_fee: None,
6328 next_holder_htlc_id: 0,
6329 next_counterparty_htlc_id: 0,
6330 update_time_counter: 1,
6332 resend_order: RAACommitmentOrder::CommitmentFirst,
6334 monitor_pending_channel_ready: false,
6335 monitor_pending_revoke_and_ack: false,
6336 monitor_pending_commitment_signed: false,
6337 monitor_pending_forwards: Vec::new(),
6338 monitor_pending_failures: Vec::new(),
6339 monitor_pending_finalized_fulfills: Vec::new(),
6341 signer_pending_commitment_update: false,
6342 signer_pending_funding: false,
6344 #[cfg(debug_assertions)]
6345 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6346 #[cfg(debug_assertions)]
6347 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6349 last_sent_closing_fee: None,
6350 pending_counterparty_closing_signed: None,
6351 expecting_peer_commitment_signed: false,
6352 closing_fee_limits: None,
6353 target_closing_feerate_sats_per_kw: None,
6355 funding_tx_confirmed_in: None,
6356 funding_tx_confirmation_height: 0,
6357 short_channel_id: None,
6358 channel_creation_height: current_chain_height,
6360 feerate_per_kw: commitment_feerate,
6361 counterparty_dust_limit_satoshis: 0,
6362 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6363 counterparty_max_htlc_value_in_flight_msat: 0,
6364 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6365 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6366 holder_selected_channel_reserve_satoshis,
6367 counterparty_htlc_minimum_msat: 0,
6368 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6369 counterparty_max_accepted_htlcs: 0,
6370 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6371 minimum_depth: None, // Filled in in accept_channel
6373 counterparty_forwarding_info: None,
6375 channel_transaction_parameters: ChannelTransactionParameters {
6376 holder_pubkeys: pubkeys,
6377 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6378 is_outbound_from_holder: true,
6379 counterparty_parameters: None,
6380 funding_outpoint: None,
6381 channel_type_features: channel_type.clone()
6383 funding_transaction: None,
6384 is_batch_funding: None,
6386 counterparty_cur_commitment_point: None,
6387 counterparty_prev_commitment_point: None,
6388 counterparty_node_id,
6390 counterparty_shutdown_scriptpubkey: None,
6392 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6394 channel_update_status: ChannelUpdateStatus::Enabled,
6395 closing_signed_in_flight: false,
6397 announcement_sigs: None,
6399 #[cfg(any(test, fuzzing))]
6400 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6401 #[cfg(any(test, fuzzing))]
6402 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6404 workaround_lnd_bug_4006: None,
6405 sent_message_awaiting_response: None,
6407 latest_inbound_scid_alias: None,
6408 outbound_scid_alias,
6410 channel_pending_event_emitted: false,
6411 channel_ready_event_emitted: false,
6413 #[cfg(any(test, fuzzing))]
6414 historical_inbound_htlc_fulfills: HashSet::new(),
6419 blocked_monitor_updates: Vec::new(),
6421 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6425 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
6426 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6427 let counterparty_keys = self.context.build_remote_transaction_keys();
6428 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6429 let signature = match &self.context.holder_signer {
6430 // TODO (taproot|arik): move match into calling method for Taproot
6431 ChannelSignerType::Ecdsa(ecdsa) => {
6432 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
6433 .map(|(sig, _)| sig).ok()?
6435 // TODO (taproot|arik)
6440 if self.context.signer_pending_funding {
6441 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
6442 self.context.signer_pending_funding = false;
6445 Some(msgs::FundingCreated {
6446 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
6447 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
6448 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
6451 partial_signature_with_nonce: None,
6453 next_local_nonce: None,
6457 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6458 /// a funding_created message for the remote peer.
6459 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6460 /// or if called on an inbound channel.
6461 /// Note that channel_id changes during this call!
6462 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6463 /// If an Err is returned, it is a ChannelError::Close.
6464 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6465 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
6466 if !self.context.is_outbound() {
6467 panic!("Tried to create outbound funding_created message on an inbound channel!");
6470 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6471 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6473 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6475 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6476 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6477 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6478 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6481 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6482 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6484 // Now that we're past error-generating stuff, update our local state:
6486 self.context.channel_state = ChannelState::FundingNegotiated;
6487 self.context.channel_id = funding_txo.to_channel_id();
6489 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6490 // We can skip this if it is a zero-conf channel.
6491 if funding_transaction.is_coin_base() &&
6492 self.context.minimum_depth.unwrap_or(0) > 0 &&
6493 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6494 self.context.minimum_depth = Some(COINBASE_MATURITY);
6497 self.context.funding_transaction = Some(funding_transaction);
6498 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6500 let funding_created = self.get_funding_created_msg(logger);
6501 if funding_created.is_none() {
6502 #[cfg(not(async_signing))] {
6503 panic!("Failed to get signature for new funding creation");
6505 #[cfg(async_signing)] {
6506 if !self.context.signer_pending_funding {
6507 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6508 self.context.signer_pending_funding = true;
6516 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6517 // The default channel type (ie the first one we try) depends on whether the channel is
6518 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6519 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6520 // with no other changes, and fall back to `only_static_remotekey`.
6521 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6522 if !config.channel_handshake_config.announced_channel &&
6523 config.channel_handshake_config.negotiate_scid_privacy &&
6524 their_features.supports_scid_privacy() {
6525 ret.set_scid_privacy_required();
6528 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6529 // set it now. If they don't understand it, we'll fall back to our default of
6530 // `only_static_remotekey`.
6531 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6532 their_features.supports_anchors_zero_fee_htlc_tx() {
6533 ret.set_anchors_zero_fee_htlc_tx_required();
6539 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6540 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6541 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6542 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6543 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6544 ) -> Result<msgs::OpenChannel, ()>
6546 F::Target: FeeEstimator
6548 if !self.context.is_outbound() ||
6550 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6551 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6556 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6557 // We've exhausted our options
6560 // We support opening a few different types of channels. Try removing our additional
6561 // features one by one until we've either arrived at our default or the counterparty has
6564 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6565 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6566 // checks whether the counterparty supports every feature, this would only happen if the
6567 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6569 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6570 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6571 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6572 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6573 } else if self.context.channel_type.supports_scid_privacy() {
6574 self.context.channel_type.clear_scid_privacy();
6576 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6578 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6579 Ok(self.get_open_channel(chain_hash))
6582 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6583 if !self.context.is_outbound() {
6584 panic!("Tried to open a channel for an inbound channel?");
6586 if self.context.have_received_message() {
6587 panic!("Cannot generate an open_channel after we've moved forward");
6590 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6591 panic!("Tried to send an open_channel for a channel that has already advanced");
6594 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6595 let keys = self.context.get_holder_pubkeys();
6599 temporary_channel_id: self.context.channel_id,
6600 funding_satoshis: self.context.channel_value_satoshis,
6601 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6602 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6603 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6604 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6605 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6606 feerate_per_kw: self.context.feerate_per_kw as u32,
6607 to_self_delay: self.context.get_holder_selected_contest_delay(),
6608 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6609 funding_pubkey: keys.funding_pubkey,
6610 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6611 payment_point: keys.payment_point,
6612 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6613 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6614 first_per_commitment_point,
6615 channel_flags: if self.context.config.announced_channel {1} else {0},
6616 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6617 Some(script) => script.clone().into_inner(),
6618 None => Builder::new().into_script(),
6620 channel_type: Some(self.context.channel_type.clone()),
6625 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6626 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6628 // Check sanity of message fields:
6629 if !self.context.is_outbound() {
6630 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6632 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
6633 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6635 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6636 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6638 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6639 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6641 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6642 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6644 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6645 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6646 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6648 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6649 if msg.htlc_minimum_msat >= full_channel_value_msat {
6650 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6652 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6653 if msg.to_self_delay > max_delay_acceptable {
6654 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6656 if msg.max_accepted_htlcs < 1 {
6657 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6659 if msg.max_accepted_htlcs > MAX_HTLCS {
6660 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6663 // Now check against optional parameters as set by config...
6664 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6665 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6667 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6668 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6670 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6671 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6673 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6674 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6676 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6677 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6679 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6680 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6682 if msg.minimum_depth > peer_limits.max_minimum_depth {
6683 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6686 if let Some(ty) = &msg.channel_type {
6687 if *ty != self.context.channel_type {
6688 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6690 } else if their_features.supports_channel_type() {
6691 // Assume they've accepted the channel type as they said they understand it.
6693 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6694 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6695 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6697 self.context.channel_type = channel_type.clone();
6698 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6701 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6702 match &msg.shutdown_scriptpubkey {
6703 &Some(ref script) => {
6704 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6705 if script.len() == 0 {
6708 if !script::is_bolt2_compliant(&script, their_features) {
6709 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6711 Some(script.clone())
6714 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6716 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6721 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6722 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6723 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6724 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6725 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6727 if peer_limits.trust_own_funding_0conf {
6728 self.context.minimum_depth = Some(msg.minimum_depth);
6730 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6733 let counterparty_pubkeys = ChannelPublicKeys {
6734 funding_pubkey: msg.funding_pubkey,
6735 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6736 payment_point: msg.payment_point,
6737 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6738 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6741 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6742 selected_contest_delay: msg.to_self_delay,
6743 pubkeys: counterparty_pubkeys,
6746 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6747 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6749 self.context.channel_state = ChannelState::NegotiatingFunding(
6750 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6752 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6757 /// Handles a funding_signed message from the remote end.
6758 /// If this call is successful, broadcast the funding transaction (and not before!)
6759 pub fn funding_signed<L: Deref>(
6760 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
6761 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
6765 if !self.context.is_outbound() {
6766 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
6768 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
6769 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
6771 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6772 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6773 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6774 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6777 let funding_script = self.context.get_funding_redeemscript();
6779 let counterparty_keys = self.context.build_remote_transaction_keys();
6780 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6781 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6782 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6784 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6785 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6787 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6788 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
6790 let trusted_tx = initial_commitment_tx.trust();
6791 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6792 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6793 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
6794 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
6795 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
6799 let holder_commitment_tx = HolderCommitmentTransaction::new(
6800 initial_commitment_tx,
6803 &self.context.get_holder_pubkeys().funding_pubkey,
6804 self.context.counterparty_funding_pubkey()
6808 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
6809 if validated.is_err() {
6810 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6813 let funding_redeemscript = self.context.get_funding_redeemscript();
6814 let funding_txo = self.context.get_funding_txo().unwrap();
6815 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6816 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6817 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6818 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6819 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6820 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6821 shutdown_script, self.context.get_holder_selected_contest_delay(),
6822 &self.context.destination_script, (funding_txo, funding_txo_script),
6823 &self.context.channel_transaction_parameters,
6824 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6826 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6827 channel_monitor.provide_initial_counterparty_commitment_tx(
6828 counterparty_initial_bitcoin_tx.txid, Vec::new(),
6829 self.context.cur_counterparty_commitment_transaction_number,
6830 self.context.counterparty_cur_commitment_point.unwrap(),
6831 counterparty_initial_commitment_tx.feerate_per_kw(),
6832 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6833 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6835 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
6836 if self.context.is_batch_funding() {
6837 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
6839 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
6841 self.context.cur_holder_commitment_transaction_number -= 1;
6842 self.context.cur_counterparty_commitment_transaction_number -= 1;
6844 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
6846 let mut channel = Channel { context: self.context };
6848 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6849 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6850 Ok((channel, channel_monitor))
6853 /// Indicates that the signer may have some signatures for us, so we should retry if we're
6855 #[cfg(async_signing)]
6856 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6857 if self.context.signer_pending_funding && self.context.is_outbound() {
6858 log_trace!(logger, "Signer unblocked a funding_created");
6859 self.get_funding_created_msg(logger)
6864 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6865 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6866 pub context: ChannelContext<SP>,
6867 pub unfunded_context: UnfundedChannelContext,
6870 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6871 /// Creates a new channel from a remote sides' request for one.
6872 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6873 pub fn new<ES: Deref, F: Deref, L: Deref>(
6874 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6875 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6876 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6877 current_chain_height: u32, logger: &L, is_0conf: bool,
6878 ) -> Result<InboundV1Channel<SP>, ChannelError>
6879 where ES::Target: EntropySource,
6880 F::Target: FeeEstimator,
6883 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6884 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6886 // First check the channel type is known, failing before we do anything else if we don't
6887 // support this channel type.
6888 let channel_type = if let Some(channel_type) = &msg.channel_type {
6889 if channel_type.supports_any_optional_bits() {
6890 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6893 // We only support the channel types defined by the `ChannelManager` in
6894 // `provided_channel_type_features`. The channel type must always support
6895 // `static_remote_key`.
6896 if !channel_type.requires_static_remote_key() {
6897 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6899 // Make sure we support all of the features behind the channel type.
6900 if !channel_type.is_subset(our_supported_features) {
6901 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6903 if channel_type.requires_scid_privacy() && announced_channel {
6904 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6906 channel_type.clone()
6908 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6909 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6910 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6915 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6916 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6917 let pubkeys = holder_signer.pubkeys().clone();
6918 let counterparty_pubkeys = ChannelPublicKeys {
6919 funding_pubkey: msg.funding_pubkey,
6920 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6921 payment_point: msg.payment_point,
6922 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6923 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6926 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6927 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6930 // Check sanity of message fields:
6931 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6932 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6934 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6935 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6937 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6938 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6940 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6941 if msg.push_msat > full_channel_value_msat {
6942 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6944 if msg.dust_limit_satoshis > msg.funding_satoshis {
6945 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6947 if msg.htlc_minimum_msat >= full_channel_value_msat {
6948 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6950 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6952 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6953 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6954 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6956 if msg.max_accepted_htlcs < 1 {
6957 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6959 if msg.max_accepted_htlcs > MAX_HTLCS {
6960 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6963 // Now check against optional parameters as set by config...
6964 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6965 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6967 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6968 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6970 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6971 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6973 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6974 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6976 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6977 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6979 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6980 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6982 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6983 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6986 // Convert things into internal flags and prep our state:
6988 if config.channel_handshake_limits.force_announced_channel_preference {
6989 if config.channel_handshake_config.announced_channel != announced_channel {
6990 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6994 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6995 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6996 // Protocol level safety check in place, although it should never happen because
6997 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6998 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
7000 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
7001 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
7003 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7004 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
7005 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
7007 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
7008 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
7011 // check if the funder's amount for the initial commitment tx is sufficient
7012 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
7013 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
7014 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
7018 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
7019 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
7020 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
7021 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
7024 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
7025 // While it's reasonable for us to not meet the channel reserve initially (if they don't
7026 // want to push much to us), our counterparty should always have more than our reserve.
7027 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
7028 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
7031 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7032 match &msg.shutdown_scriptpubkey {
7033 &Some(ref script) => {
7034 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7035 if script.len() == 0 {
7038 if !script::is_bolt2_compliant(&script, their_features) {
7039 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
7041 Some(script.clone())
7044 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7046 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7051 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
7052 match signer_provider.get_shutdown_scriptpubkey() {
7053 Ok(scriptpubkey) => Some(scriptpubkey),
7054 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
7058 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
7059 if !shutdown_scriptpubkey.is_compatible(&their_features) {
7060 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
7064 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
7065 Ok(script) => script,
7066 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
7069 let mut secp_ctx = Secp256k1::new();
7070 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7072 let minimum_depth = if is_0conf {
7075 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
7079 context: ChannelContext {
7082 config: LegacyChannelConfig {
7083 options: config.channel_config.clone(),
7085 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
7090 inbound_handshake_limits_override: None,
7092 temporary_channel_id: Some(msg.temporary_channel_id),
7093 channel_id: msg.temporary_channel_id,
7094 channel_state: ChannelState::NegotiatingFunding(
7095 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7097 announcement_sigs_state: AnnouncementSigsState::NotSent,
7100 latest_monitor_update_id: 0,
7102 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7103 shutdown_scriptpubkey,
7106 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7107 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7108 value_to_self_msat: msg.push_msat,
7110 pending_inbound_htlcs: Vec::new(),
7111 pending_outbound_htlcs: Vec::new(),
7112 holding_cell_htlc_updates: Vec::new(),
7113 pending_update_fee: None,
7114 holding_cell_update_fee: None,
7115 next_holder_htlc_id: 0,
7116 next_counterparty_htlc_id: 0,
7117 update_time_counter: 1,
7119 resend_order: RAACommitmentOrder::CommitmentFirst,
7121 monitor_pending_channel_ready: false,
7122 monitor_pending_revoke_and_ack: false,
7123 monitor_pending_commitment_signed: false,
7124 monitor_pending_forwards: Vec::new(),
7125 monitor_pending_failures: Vec::new(),
7126 monitor_pending_finalized_fulfills: Vec::new(),
7128 signer_pending_commitment_update: false,
7129 signer_pending_funding: false,
7131 #[cfg(debug_assertions)]
7132 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7133 #[cfg(debug_assertions)]
7134 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7136 last_sent_closing_fee: None,
7137 pending_counterparty_closing_signed: None,
7138 expecting_peer_commitment_signed: false,
7139 closing_fee_limits: None,
7140 target_closing_feerate_sats_per_kw: None,
7142 funding_tx_confirmed_in: None,
7143 funding_tx_confirmation_height: 0,
7144 short_channel_id: None,
7145 channel_creation_height: current_chain_height,
7147 feerate_per_kw: msg.feerate_per_kw,
7148 channel_value_satoshis: msg.funding_satoshis,
7149 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
7150 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7151 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
7152 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
7153 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7154 holder_selected_channel_reserve_satoshis,
7155 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
7156 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7157 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
7158 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7161 counterparty_forwarding_info: None,
7163 channel_transaction_parameters: ChannelTransactionParameters {
7164 holder_pubkeys: pubkeys,
7165 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7166 is_outbound_from_holder: false,
7167 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7168 selected_contest_delay: msg.to_self_delay,
7169 pubkeys: counterparty_pubkeys,
7171 funding_outpoint: None,
7172 channel_type_features: channel_type.clone()
7174 funding_transaction: None,
7175 is_batch_funding: None,
7177 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7178 counterparty_prev_commitment_point: None,
7179 counterparty_node_id,
7181 counterparty_shutdown_scriptpubkey,
7183 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7185 channel_update_status: ChannelUpdateStatus::Enabled,
7186 closing_signed_in_flight: false,
7188 announcement_sigs: None,
7190 #[cfg(any(test, fuzzing))]
7191 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7192 #[cfg(any(test, fuzzing))]
7193 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7195 workaround_lnd_bug_4006: None,
7196 sent_message_awaiting_response: None,
7198 latest_inbound_scid_alias: None,
7199 outbound_scid_alias: 0,
7201 channel_pending_event_emitted: false,
7202 channel_ready_event_emitted: false,
7204 #[cfg(any(test, fuzzing))]
7205 historical_inbound_htlc_fulfills: HashSet::new(),
7210 blocked_monitor_updates: Vec::new(),
7212 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7218 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7219 /// should be sent back to the counterparty node.
7221 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7222 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7223 if self.context.is_outbound() {
7224 panic!("Tried to send accept_channel for an outbound channel?");
7227 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7228 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7230 panic!("Tried to send accept_channel after channel had moved forward");
7232 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7233 panic!("Tried to send an accept_channel for a channel that has already advanced");
7236 self.generate_accept_channel_message()
7239 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7240 /// inbound channel. If the intention is to accept an inbound channel, use
7241 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7243 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7244 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7245 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7246 let keys = self.context.get_holder_pubkeys();
7248 msgs::AcceptChannel {
7249 temporary_channel_id: self.context.channel_id,
7250 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7251 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7252 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7253 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7254 minimum_depth: self.context.minimum_depth.unwrap(),
7255 to_self_delay: self.context.get_holder_selected_contest_delay(),
7256 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7257 funding_pubkey: keys.funding_pubkey,
7258 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7259 payment_point: keys.payment_point,
7260 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7261 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7262 first_per_commitment_point,
7263 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7264 Some(script) => script.clone().into_inner(),
7265 None => Builder::new().into_script(),
7267 channel_type: Some(self.context.channel_type.clone()),
7269 next_local_nonce: None,
7273 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7274 /// inbound channel without accepting it.
7276 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7278 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7279 self.generate_accept_channel_message()
7282 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7283 let funding_script = self.context.get_funding_redeemscript();
7285 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7286 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7287 let trusted_tx = initial_commitment_tx.trust();
7288 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7289 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7290 // They sign the holder commitment transaction...
7291 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7292 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7293 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7294 encode::serialize_hex(&funding_script), &self.context.channel_id());
7295 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7297 Ok(initial_commitment_tx)
7300 pub fn funding_created<L: Deref>(
7301 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7302 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7306 if self.context.is_outbound() {
7307 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7310 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7311 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7313 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7314 // remember the channel, so it's safe to just send an error_message here and drop the
7316 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7318 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7319 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7320 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7321 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7324 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7325 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7326 // This is an externally observable change before we finish all our checks. In particular
7327 // check_funding_created_signature may fail.
7328 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7330 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7332 Err(ChannelError::Close(e)) => {
7333 self.context.channel_transaction_parameters.funding_outpoint = None;
7334 return Err((self, ChannelError::Close(e)));
7337 // The only error we know how to handle is ChannelError::Close, so we fall over here
7338 // to make sure we don't continue with an inconsistent state.
7339 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7343 let holder_commitment_tx = HolderCommitmentTransaction::new(
7344 initial_commitment_tx,
7347 &self.context.get_holder_pubkeys().funding_pubkey,
7348 self.context.counterparty_funding_pubkey()
7351 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7352 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7355 // Now that we're past error-generating stuff, update our local state:
7357 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7358 self.context.channel_id = funding_txo.to_channel_id();
7359 self.context.cur_counterparty_commitment_transaction_number -= 1;
7360 self.context.cur_holder_commitment_transaction_number -= 1;
7362 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7364 let funding_redeemscript = self.context.get_funding_redeemscript();
7365 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7366 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7367 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7368 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7369 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7370 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7371 shutdown_script, self.context.get_holder_selected_contest_delay(),
7372 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7373 &self.context.channel_transaction_parameters,
7374 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7376 holder_commitment_tx, best_block, self.context.counterparty_node_id);
7377 channel_monitor.provide_initial_counterparty_commitment_tx(
7378 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7379 self.context.cur_counterparty_commitment_transaction_number + 1,
7380 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7381 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7382 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7384 log_info!(logger, "{} funding_signed for peer for channel {}",
7385 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7387 // Promote the channel to a full-fledged one now that we have updated the state and have a
7388 // `ChannelMonitor`.
7389 let mut channel = Channel {
7390 context: self.context,
7392 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7393 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7395 Ok((channel, funding_signed, channel_monitor))
7399 const SERIALIZATION_VERSION: u8 = 3;
7400 const MIN_SERIALIZATION_VERSION: u8 = 3;
7402 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7408 impl Writeable for ChannelUpdateStatus {
7409 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7410 // We only care about writing out the current state as it was announced, ie only either
7411 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7412 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7414 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7415 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7416 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7417 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7423 impl Readable for ChannelUpdateStatus {
7424 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7425 Ok(match <u8 as Readable>::read(reader)? {
7426 0 => ChannelUpdateStatus::Enabled,
7427 1 => ChannelUpdateStatus::Disabled,
7428 _ => return Err(DecodeError::InvalidValue),
7433 impl Writeable for AnnouncementSigsState {
7434 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7435 // We only care about writing out the current state as if we had just disconnected, at
7436 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7438 AnnouncementSigsState::NotSent => 0u8.write(writer),
7439 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7440 AnnouncementSigsState::Committed => 0u8.write(writer),
7441 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7446 impl Readable for AnnouncementSigsState {
7447 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7448 Ok(match <u8 as Readable>::read(reader)? {
7449 0 => AnnouncementSigsState::NotSent,
7450 1 => AnnouncementSigsState::PeerReceived,
7451 _ => return Err(DecodeError::InvalidValue),
7456 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7457 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7458 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7461 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7463 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7464 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7465 // the low bytes now and the optional high bytes later.
7466 let user_id_low = self.context.user_id as u64;
7467 user_id_low.write(writer)?;
7469 // Version 1 deserializers expected to read parts of the config object here. Version 2
7470 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7471 // `minimum_depth` we simply write dummy values here.
7472 writer.write_all(&[0; 8])?;
7474 self.context.channel_id.write(writer)?;
7476 let mut channel_state = self.context.channel_state;
7477 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7478 channel_state.set_peer_disconnected();
7480 channel_state.to_u32().write(writer)?;
7482 self.context.channel_value_satoshis.write(writer)?;
7484 self.context.latest_monitor_update_id.write(writer)?;
7486 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7487 // deserialized from that format.
7488 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7489 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7490 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7492 self.context.destination_script.write(writer)?;
7494 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7495 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7496 self.context.value_to_self_msat.write(writer)?;
7498 let mut dropped_inbound_htlcs = 0;
7499 for htlc in self.context.pending_inbound_htlcs.iter() {
7500 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7501 dropped_inbound_htlcs += 1;
7504 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7505 for htlc in self.context.pending_inbound_htlcs.iter() {
7506 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7509 htlc.htlc_id.write(writer)?;
7510 htlc.amount_msat.write(writer)?;
7511 htlc.cltv_expiry.write(writer)?;
7512 htlc.payment_hash.write(writer)?;
7514 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7515 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7517 htlc_state.write(writer)?;
7519 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7521 htlc_state.write(writer)?;
7523 &InboundHTLCState::Committed => {
7526 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7528 removal_reason.write(writer)?;
7533 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7534 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7535 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7537 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7538 for htlc in self.context.pending_outbound_htlcs.iter() {
7539 htlc.htlc_id.write(writer)?;
7540 htlc.amount_msat.write(writer)?;
7541 htlc.cltv_expiry.write(writer)?;
7542 htlc.payment_hash.write(writer)?;
7543 htlc.source.write(writer)?;
7545 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7547 onion_packet.write(writer)?;
7549 &OutboundHTLCState::Committed => {
7552 &OutboundHTLCState::RemoteRemoved(_) => {
7553 // Treat this as a Committed because we haven't received the CS - they'll
7554 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7557 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7559 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7560 preimages.push(preimage);
7562 let reason: Option<&HTLCFailReason> = outcome.into();
7563 reason.write(writer)?;
7565 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7567 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7568 preimages.push(preimage);
7570 let reason: Option<&HTLCFailReason> = outcome.into();
7571 reason.write(writer)?;
7574 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7575 pending_outbound_blinding_points.push(htlc.blinding_point);
7578 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7579 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7580 // Vec of (htlc_id, failure_code, sha256_of_onion)
7581 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
7582 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7583 for update in self.context.holding_cell_htlc_updates.iter() {
7585 &HTLCUpdateAwaitingACK::AddHTLC {
7586 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7587 blinding_point, skimmed_fee_msat,
7590 amount_msat.write(writer)?;
7591 cltv_expiry.write(writer)?;
7592 payment_hash.write(writer)?;
7593 source.write(writer)?;
7594 onion_routing_packet.write(writer)?;
7596 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7597 holding_cell_blinding_points.push(blinding_point);
7599 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7601 payment_preimage.write(writer)?;
7602 htlc_id.write(writer)?;
7604 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7606 htlc_id.write(writer)?;
7607 err_packet.write(writer)?;
7609 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
7610 htlc_id, failure_code, sha256_of_onion
7612 // We don't want to break downgrading by adding a new variant, so write a dummy
7613 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
7614 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
7616 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
7618 htlc_id.write(writer)?;
7619 dummy_err_packet.write(writer)?;
7624 match self.context.resend_order {
7625 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7626 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7629 self.context.monitor_pending_channel_ready.write(writer)?;
7630 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7631 self.context.monitor_pending_commitment_signed.write(writer)?;
7633 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7634 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7635 pending_forward.write(writer)?;
7636 htlc_id.write(writer)?;
7639 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7640 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7641 htlc_source.write(writer)?;
7642 payment_hash.write(writer)?;
7643 fail_reason.write(writer)?;
7646 if self.context.is_outbound() {
7647 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7648 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7649 Some(feerate).write(writer)?;
7651 // As for inbound HTLCs, if the update was only announced and never committed in a
7652 // commitment_signed, drop it.
7653 None::<u32>.write(writer)?;
7655 self.context.holding_cell_update_fee.write(writer)?;
7657 self.context.next_holder_htlc_id.write(writer)?;
7658 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7659 self.context.update_time_counter.write(writer)?;
7660 self.context.feerate_per_kw.write(writer)?;
7662 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7663 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7664 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7665 // consider the stale state on reload.
7668 self.context.funding_tx_confirmed_in.write(writer)?;
7669 self.context.funding_tx_confirmation_height.write(writer)?;
7670 self.context.short_channel_id.write(writer)?;
7672 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7673 self.context.holder_dust_limit_satoshis.write(writer)?;
7674 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7676 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7677 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7679 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7680 self.context.holder_htlc_minimum_msat.write(writer)?;
7681 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7683 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7684 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7686 match &self.context.counterparty_forwarding_info {
7689 info.fee_base_msat.write(writer)?;
7690 info.fee_proportional_millionths.write(writer)?;
7691 info.cltv_expiry_delta.write(writer)?;
7693 None => 0u8.write(writer)?
7696 self.context.channel_transaction_parameters.write(writer)?;
7697 self.context.funding_transaction.write(writer)?;
7699 self.context.counterparty_cur_commitment_point.write(writer)?;
7700 self.context.counterparty_prev_commitment_point.write(writer)?;
7701 self.context.counterparty_node_id.write(writer)?;
7703 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7705 self.context.commitment_secrets.write(writer)?;
7707 self.context.channel_update_status.write(writer)?;
7709 #[cfg(any(test, fuzzing))]
7710 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7711 #[cfg(any(test, fuzzing))]
7712 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7713 htlc.write(writer)?;
7716 // If the channel type is something other than only-static-remote-key, then we need to have
7717 // older clients fail to deserialize this channel at all. If the type is
7718 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7720 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7721 Some(&self.context.channel_type) } else { None };
7723 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7724 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7725 // a different percentage of the channel value then 10%, which older versions of LDK used
7726 // to set it to before the percentage was made configurable.
7727 let serialized_holder_selected_reserve =
7728 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7729 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7731 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7732 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7733 let serialized_holder_htlc_max_in_flight =
7734 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7735 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7737 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7738 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7740 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7741 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7742 // we write the high bytes as an option here.
7743 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7745 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7747 write_tlv_fields!(writer, {
7748 (0, self.context.announcement_sigs, option),
7749 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7750 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7751 // them twice, once with their original default values above, and once as an option
7752 // here. On the read side, old versions will simply ignore the odd-type entries here,
7753 // and new versions map the default values to None and allow the TLV entries here to
7755 (1, self.context.minimum_depth, option),
7756 (2, chan_type, option),
7757 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7758 (4, serialized_holder_selected_reserve, option),
7759 (5, self.context.config, required),
7760 (6, serialized_holder_htlc_max_in_flight, option),
7761 (7, self.context.shutdown_scriptpubkey, option),
7762 (8, self.context.blocked_monitor_updates, optional_vec),
7763 (9, self.context.target_closing_feerate_sats_per_kw, option),
7764 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7765 (13, self.context.channel_creation_height, required),
7766 (15, preimages, required_vec),
7767 (17, self.context.announcement_sigs_state, required),
7768 (19, self.context.latest_inbound_scid_alias, option),
7769 (21, self.context.outbound_scid_alias, required),
7770 (23, channel_ready_event_emitted, option),
7771 (25, user_id_high_opt, option),
7772 (27, self.context.channel_keys_id, required),
7773 (28, holder_max_accepted_htlcs, option),
7774 (29, self.context.temporary_channel_id, option),
7775 (31, channel_pending_event_emitted, option),
7776 (35, pending_outbound_skimmed_fees, optional_vec),
7777 (37, holding_cell_skimmed_fees, optional_vec),
7778 (38, self.context.is_batch_funding, option),
7779 (39, pending_outbound_blinding_points, optional_vec),
7780 (41, holding_cell_blinding_points, optional_vec),
7781 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
7788 const MAX_ALLOC_SIZE: usize = 64*1024;
7789 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7791 ES::Target: EntropySource,
7792 SP::Target: SignerProvider
7794 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7795 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7796 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7798 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7799 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7800 // the low bytes now and the high bytes later.
7801 let user_id_low: u64 = Readable::read(reader)?;
7803 let mut config = Some(LegacyChannelConfig::default());
7805 // Read the old serialization of the ChannelConfig from version 0.0.98.
7806 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7807 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7808 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7809 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7811 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7812 let mut _val: u64 = Readable::read(reader)?;
7815 let channel_id = Readable::read(reader)?;
7816 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
7817 let channel_value_satoshis = Readable::read(reader)?;
7819 let latest_monitor_update_id = Readable::read(reader)?;
7821 let mut keys_data = None;
7823 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7824 // the `channel_keys_id` TLV is present below.
7825 let keys_len: u32 = Readable::read(reader)?;
7826 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7827 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7828 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7829 let mut data = [0; 1024];
7830 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7831 reader.read_exact(read_slice)?;
7832 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7836 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7837 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7838 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7841 let destination_script = Readable::read(reader)?;
7843 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7844 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7845 let value_to_self_msat = Readable::read(reader)?;
7847 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7849 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7850 for _ in 0..pending_inbound_htlc_count {
7851 pending_inbound_htlcs.push(InboundHTLCOutput {
7852 htlc_id: Readable::read(reader)?,
7853 amount_msat: Readable::read(reader)?,
7854 cltv_expiry: Readable::read(reader)?,
7855 payment_hash: Readable::read(reader)?,
7856 state: match <u8 as Readable>::read(reader)? {
7857 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7858 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7859 3 => InboundHTLCState::Committed,
7860 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7861 _ => return Err(DecodeError::InvalidValue),
7866 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7867 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7868 for _ in 0..pending_outbound_htlc_count {
7869 pending_outbound_htlcs.push(OutboundHTLCOutput {
7870 htlc_id: Readable::read(reader)?,
7871 amount_msat: Readable::read(reader)?,
7872 cltv_expiry: Readable::read(reader)?,
7873 payment_hash: Readable::read(reader)?,
7874 source: Readable::read(reader)?,
7875 state: match <u8 as Readable>::read(reader)? {
7876 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7877 1 => OutboundHTLCState::Committed,
7879 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7880 OutboundHTLCState::RemoteRemoved(option.into())
7883 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7884 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7887 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7888 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7890 _ => return Err(DecodeError::InvalidValue),
7892 skimmed_fee_msat: None,
7893 blinding_point: None,
7897 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7898 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7899 for _ in 0..holding_cell_htlc_update_count {
7900 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7901 0 => HTLCUpdateAwaitingACK::AddHTLC {
7902 amount_msat: Readable::read(reader)?,
7903 cltv_expiry: Readable::read(reader)?,
7904 payment_hash: Readable::read(reader)?,
7905 source: Readable::read(reader)?,
7906 onion_routing_packet: Readable::read(reader)?,
7907 skimmed_fee_msat: None,
7908 blinding_point: None,
7910 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7911 payment_preimage: Readable::read(reader)?,
7912 htlc_id: Readable::read(reader)?,
7914 2 => HTLCUpdateAwaitingACK::FailHTLC {
7915 htlc_id: Readable::read(reader)?,
7916 err_packet: Readable::read(reader)?,
7918 _ => return Err(DecodeError::InvalidValue),
7922 let resend_order = match <u8 as Readable>::read(reader)? {
7923 0 => RAACommitmentOrder::CommitmentFirst,
7924 1 => RAACommitmentOrder::RevokeAndACKFirst,
7925 _ => return Err(DecodeError::InvalidValue),
7928 let monitor_pending_channel_ready = Readable::read(reader)?;
7929 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7930 let monitor_pending_commitment_signed = Readable::read(reader)?;
7932 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7933 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7934 for _ in 0..monitor_pending_forwards_count {
7935 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7938 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7939 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7940 for _ in 0..monitor_pending_failures_count {
7941 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7944 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7946 let holding_cell_update_fee = Readable::read(reader)?;
7948 let next_holder_htlc_id = Readable::read(reader)?;
7949 let next_counterparty_htlc_id = Readable::read(reader)?;
7950 let update_time_counter = Readable::read(reader)?;
7951 let feerate_per_kw = Readable::read(reader)?;
7953 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7954 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7955 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7956 // consider the stale state on reload.
7957 match <u8 as Readable>::read(reader)? {
7960 let _: u32 = Readable::read(reader)?;
7961 let _: u64 = Readable::read(reader)?;
7962 let _: Signature = Readable::read(reader)?;
7964 _ => return Err(DecodeError::InvalidValue),
7967 let funding_tx_confirmed_in = Readable::read(reader)?;
7968 let funding_tx_confirmation_height = Readable::read(reader)?;
7969 let short_channel_id = Readable::read(reader)?;
7971 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7972 let holder_dust_limit_satoshis = Readable::read(reader)?;
7973 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7974 let mut counterparty_selected_channel_reserve_satoshis = None;
7976 // Read the old serialization from version 0.0.98.
7977 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7979 // Read the 8 bytes of backwards-compatibility data.
7980 let _dummy: u64 = Readable::read(reader)?;
7982 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7983 let holder_htlc_minimum_msat = Readable::read(reader)?;
7984 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7986 let mut minimum_depth = None;
7988 // Read the old serialization from version 0.0.98.
7989 minimum_depth = Some(Readable::read(reader)?);
7991 // Read the 4 bytes of backwards-compatibility data.
7992 let _dummy: u32 = Readable::read(reader)?;
7995 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7997 1 => Some(CounterpartyForwardingInfo {
7998 fee_base_msat: Readable::read(reader)?,
7999 fee_proportional_millionths: Readable::read(reader)?,
8000 cltv_expiry_delta: Readable::read(reader)?,
8002 _ => return Err(DecodeError::InvalidValue),
8005 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
8006 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
8008 let counterparty_cur_commitment_point = Readable::read(reader)?;
8010 let counterparty_prev_commitment_point = Readable::read(reader)?;
8011 let counterparty_node_id = Readable::read(reader)?;
8013 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
8014 let commitment_secrets = Readable::read(reader)?;
8016 let channel_update_status = Readable::read(reader)?;
8018 #[cfg(any(test, fuzzing))]
8019 let mut historical_inbound_htlc_fulfills = HashSet::new();
8020 #[cfg(any(test, fuzzing))]
8022 let htlc_fulfills_len: u64 = Readable::read(reader)?;
8023 for _ in 0..htlc_fulfills_len {
8024 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
8028 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
8029 Some((feerate, if channel_parameters.is_outbound_from_holder {
8030 FeeUpdateState::Outbound
8032 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
8038 let mut announcement_sigs = None;
8039 let mut target_closing_feerate_sats_per_kw = None;
8040 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
8041 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
8042 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
8043 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
8044 // only, so we default to that if none was written.
8045 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
8046 let mut channel_creation_height = Some(serialized_height);
8047 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
8049 // If we read an old Channel, for simplicity we just treat it as "we never sent an
8050 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
8051 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
8052 let mut latest_inbound_scid_alias = None;
8053 let mut outbound_scid_alias = None;
8054 let mut channel_pending_event_emitted = None;
8055 let mut channel_ready_event_emitted = None;
8057 let mut user_id_high_opt: Option<u64> = None;
8058 let mut channel_keys_id: Option<[u8; 32]> = None;
8059 let mut temporary_channel_id: Option<ChannelId> = None;
8060 let mut holder_max_accepted_htlcs: Option<u16> = None;
8062 let mut blocked_monitor_updates = Some(Vec::new());
8064 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8065 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8067 let mut is_batch_funding: Option<()> = None;
8069 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8070 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8072 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8074 read_tlv_fields!(reader, {
8075 (0, announcement_sigs, option),
8076 (1, minimum_depth, option),
8077 (2, channel_type, option),
8078 (3, counterparty_selected_channel_reserve_satoshis, option),
8079 (4, holder_selected_channel_reserve_satoshis, option),
8080 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8081 (6, holder_max_htlc_value_in_flight_msat, option),
8082 (7, shutdown_scriptpubkey, option),
8083 (8, blocked_monitor_updates, optional_vec),
8084 (9, target_closing_feerate_sats_per_kw, option),
8085 (11, monitor_pending_finalized_fulfills, optional_vec),
8086 (13, channel_creation_height, option),
8087 (15, preimages_opt, optional_vec),
8088 (17, announcement_sigs_state, option),
8089 (19, latest_inbound_scid_alias, option),
8090 (21, outbound_scid_alias, option),
8091 (23, channel_ready_event_emitted, option),
8092 (25, user_id_high_opt, option),
8093 (27, channel_keys_id, option),
8094 (28, holder_max_accepted_htlcs, option),
8095 (29, temporary_channel_id, option),
8096 (31, channel_pending_event_emitted, option),
8097 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8098 (37, holding_cell_skimmed_fees_opt, optional_vec),
8099 (38, is_batch_funding, option),
8100 (39, pending_outbound_blinding_points_opt, optional_vec),
8101 (41, holding_cell_blinding_points_opt, optional_vec),
8102 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8105 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8106 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8107 // If we've gotten to the funding stage of the channel, populate the signer with its
8108 // required channel parameters.
8109 if channel_state >= ChannelState::FundingNegotiated {
8110 holder_signer.provide_channel_parameters(&channel_parameters);
8112 (channel_keys_id, holder_signer)
8114 // `keys_data` can be `None` if we had corrupted data.
8115 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8116 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8117 (holder_signer.channel_keys_id(), holder_signer)
8120 if let Some(preimages) = preimages_opt {
8121 let mut iter = preimages.into_iter();
8122 for htlc in pending_outbound_htlcs.iter_mut() {
8124 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8125 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8127 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8128 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8133 // We expect all preimages to be consumed above
8134 if iter.next().is_some() {
8135 return Err(DecodeError::InvalidValue);
8139 let chan_features = channel_type.as_ref().unwrap();
8140 if !chan_features.is_subset(our_supported_features) {
8141 // If the channel was written by a new version and negotiated with features we don't
8142 // understand yet, refuse to read it.
8143 return Err(DecodeError::UnknownRequiredFeature);
8146 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8147 // To account for that, we're proactively setting/overriding the field here.
8148 channel_parameters.channel_type_features = chan_features.clone();
8150 let mut secp_ctx = Secp256k1::new();
8151 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8153 // `user_id` used to be a single u64 value. In order to remain backwards
8154 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8155 // separate u64 values.
8156 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8158 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8160 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8161 let mut iter = skimmed_fees.into_iter();
8162 for htlc in pending_outbound_htlcs.iter_mut() {
8163 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8165 // We expect all skimmed fees to be consumed above
8166 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8168 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8169 let mut iter = skimmed_fees.into_iter();
8170 for htlc in holding_cell_htlc_updates.iter_mut() {
8171 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8172 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8175 // We expect all skimmed fees to be consumed above
8176 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8178 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8179 let mut iter = blinding_pts.into_iter();
8180 for htlc in pending_outbound_htlcs.iter_mut() {
8181 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8183 // We expect all blinding points to be consumed above
8184 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8186 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8187 let mut iter = blinding_pts.into_iter();
8188 for htlc in holding_cell_htlc_updates.iter_mut() {
8189 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8190 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8193 // We expect all blinding points to be consumed above
8194 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8197 if let Some(malformed_htlcs) = malformed_htlcs {
8198 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
8199 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
8200 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
8201 let matches = *htlc_id == malformed_htlc_id;
8202 if matches { debug_assert!(err_packet.data.is_empty()) }
8205 }).ok_or(DecodeError::InvalidValue)?;
8206 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
8207 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
8209 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
8214 context: ChannelContext {
8217 config: config.unwrap(),
8221 // Note that we don't care about serializing handshake limits as we only ever serialize
8222 // channel data after the handshake has completed.
8223 inbound_handshake_limits_override: None,
8226 temporary_channel_id,
8228 announcement_sigs_state: announcement_sigs_state.unwrap(),
8230 channel_value_satoshis,
8232 latest_monitor_update_id,
8234 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8235 shutdown_scriptpubkey,
8238 cur_holder_commitment_transaction_number,
8239 cur_counterparty_commitment_transaction_number,
8242 holder_max_accepted_htlcs,
8243 pending_inbound_htlcs,
8244 pending_outbound_htlcs,
8245 holding_cell_htlc_updates,
8249 monitor_pending_channel_ready,
8250 monitor_pending_revoke_and_ack,
8251 monitor_pending_commitment_signed,
8252 monitor_pending_forwards,
8253 monitor_pending_failures,
8254 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8256 signer_pending_commitment_update: false,
8257 signer_pending_funding: false,
8260 holding_cell_update_fee,
8261 next_holder_htlc_id,
8262 next_counterparty_htlc_id,
8263 update_time_counter,
8266 #[cfg(debug_assertions)]
8267 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8268 #[cfg(debug_assertions)]
8269 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8271 last_sent_closing_fee: None,
8272 pending_counterparty_closing_signed: None,
8273 expecting_peer_commitment_signed: false,
8274 closing_fee_limits: None,
8275 target_closing_feerate_sats_per_kw,
8277 funding_tx_confirmed_in,
8278 funding_tx_confirmation_height,
8280 channel_creation_height: channel_creation_height.unwrap(),
8282 counterparty_dust_limit_satoshis,
8283 holder_dust_limit_satoshis,
8284 counterparty_max_htlc_value_in_flight_msat,
8285 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8286 counterparty_selected_channel_reserve_satoshis,
8287 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8288 counterparty_htlc_minimum_msat,
8289 holder_htlc_minimum_msat,
8290 counterparty_max_accepted_htlcs,
8293 counterparty_forwarding_info,
8295 channel_transaction_parameters: channel_parameters,
8296 funding_transaction,
8299 counterparty_cur_commitment_point,
8300 counterparty_prev_commitment_point,
8301 counterparty_node_id,
8303 counterparty_shutdown_scriptpubkey,
8307 channel_update_status,
8308 closing_signed_in_flight: false,
8312 #[cfg(any(test, fuzzing))]
8313 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8314 #[cfg(any(test, fuzzing))]
8315 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8317 workaround_lnd_bug_4006: None,
8318 sent_message_awaiting_response: None,
8320 latest_inbound_scid_alias,
8321 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8322 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8324 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8325 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8327 #[cfg(any(test, fuzzing))]
8328 historical_inbound_htlc_fulfills,
8330 channel_type: channel_type.unwrap(),
8333 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8342 use bitcoin::blockdata::constants::ChainHash;
8343 use bitcoin::blockdata::script::{ScriptBuf, Builder};
8344 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8345 use bitcoin::blockdata::opcodes;
8346 use bitcoin::network::constants::Network;
8347 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
8348 use crate::ln::{PaymentHash, PaymentPreimage};
8349 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8350 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8351 use crate::ln::channel::InitFeatures;
8352 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8353 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8354 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8355 use crate::ln::msgs;
8356 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8357 use crate::ln::script::ShutdownScript;
8358 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8359 use crate::chain::BestBlock;
8360 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8361 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8362 use crate::chain::transaction::OutPoint;
8363 use crate::routing::router::{Path, RouteHop};
8364 use crate::util::config::UserConfig;
8365 use crate::util::errors::APIError;
8366 use crate::util::ser::{ReadableArgs, Writeable};
8367 use crate::util::test_utils;
8368 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8369 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8370 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8371 use bitcoin::secp256k1::{SecretKey,PublicKey};
8372 use bitcoin::hashes::sha256::Hash as Sha256;
8373 use bitcoin::hashes::Hash;
8374 use bitcoin::hashes::hex::FromHex;
8375 use bitcoin::hash_types::WPubkeyHash;
8376 use bitcoin::blockdata::locktime::absolute::LockTime;
8377 use bitcoin::address::{WitnessProgram, WitnessVersion};
8378 use crate::prelude::*;
8380 struct TestFeeEstimator {
8383 impl FeeEstimator for TestFeeEstimator {
8384 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8390 fn test_max_funding_satoshis_no_wumbo() {
8391 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8392 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8393 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8397 signer: InMemorySigner,
8400 impl EntropySource for Keys {
8401 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8404 impl SignerProvider for Keys {
8405 type EcdsaSigner = InMemorySigner;
8407 type TaprootSigner = InMemorySigner;
8409 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8410 self.signer.channel_keys_id()
8413 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8417 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8419 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8420 let secp_ctx = Secp256k1::signing_only();
8421 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8422 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8423 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8426 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8427 let secp_ctx = Secp256k1::signing_only();
8428 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8429 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8433 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8434 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8435 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8439 fn upfront_shutdown_script_incompatibility() {
8440 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8441 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8442 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8445 let seed = [42; 32];
8446 let network = Network::Testnet;
8447 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8448 keys_provider.expect(OnGetShutdownScriptpubkey {
8449 returns: non_v0_segwit_shutdown_script.clone(),
8452 let secp_ctx = Secp256k1::new();
8453 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8454 let config = UserConfig::default();
8455 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8456 Err(APIError::IncompatibleShutdownScript { script }) => {
8457 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8459 Err(e) => panic!("Unexpected error: {:?}", e),
8460 Ok(_) => panic!("Expected error"),
8464 // Check that, during channel creation, we use the same feerate in the open channel message
8465 // as we do in the Channel object creation itself.
8467 fn test_open_channel_msg_fee() {
8468 let original_fee = 253;
8469 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8470 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8471 let secp_ctx = Secp256k1::new();
8472 let seed = [42; 32];
8473 let network = Network::Testnet;
8474 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8476 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8477 let config = UserConfig::default();
8478 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8480 // Now change the fee so we can check that the fee in the open_channel message is the
8481 // same as the old fee.
8482 fee_est.fee_est = 500;
8483 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8484 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8488 fn test_holder_vs_counterparty_dust_limit() {
8489 // Test that when calculating the local and remote commitment transaction fees, the correct
8490 // dust limits are used.
8491 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8492 let secp_ctx = Secp256k1::new();
8493 let seed = [42; 32];
8494 let network = Network::Testnet;
8495 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8496 let logger = test_utils::TestLogger::new();
8497 let best_block = BestBlock::from_network(network);
8499 // Go through the flow of opening a channel between two nodes, making sure
8500 // they have different dust limits.
8502 // Create Node A's channel pointing to Node B's pubkey
8503 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8504 let config = UserConfig::default();
8505 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8507 // Create Node B's channel by receiving Node A's open_channel message
8508 // Make sure A's dust limit is as we expect.
8509 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8510 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8511 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8513 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8514 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8515 accept_channel_msg.dust_limit_satoshis = 546;
8516 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8517 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8519 // Node A --> Node B: funding created
8520 let output_script = node_a_chan.context.get_funding_redeemscript();
8521 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8522 value: 10000000, script_pubkey: output_script.clone(),
8524 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8525 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8526 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8528 // Node B --> Node A: funding signed
8529 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8530 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8532 // Put some inbound and outbound HTLCs in A's channel.
8533 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8534 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8536 amount_msat: htlc_amount_msat,
8537 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8538 cltv_expiry: 300000000,
8539 state: InboundHTLCState::Committed,
8542 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8544 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8545 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8546 cltv_expiry: 200000000,
8547 state: OutboundHTLCState::Committed,
8548 source: HTLCSource::OutboundRoute {
8549 path: Path { hops: Vec::new(), blinded_tail: None },
8550 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8551 first_hop_htlc_msat: 548,
8552 payment_id: PaymentId([42; 32]),
8554 skimmed_fee_msat: None,
8555 blinding_point: None,
8558 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8559 // the dust limit check.
8560 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8561 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8562 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8563 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8565 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8566 // of the HTLCs are seen to be above the dust limit.
8567 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8568 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8569 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8570 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8571 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8575 fn test_timeout_vs_success_htlc_dust_limit() {
8576 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8577 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8578 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8579 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8580 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8581 let secp_ctx = Secp256k1::new();
8582 let seed = [42; 32];
8583 let network = Network::Testnet;
8584 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8586 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8587 let config = UserConfig::default();
8588 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8590 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8591 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8593 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8594 // counted as dust when it shouldn't be.
8595 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8596 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8597 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8598 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8600 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8601 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8602 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8603 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8604 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8606 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8608 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8609 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8610 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8611 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8612 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8614 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8615 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8616 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8617 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8618 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8622 fn channel_reestablish_no_updates() {
8623 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8624 let logger = test_utils::TestLogger::new();
8625 let secp_ctx = Secp256k1::new();
8626 let seed = [42; 32];
8627 let network = Network::Testnet;
8628 let best_block = BestBlock::from_network(network);
8629 let chain_hash = ChainHash::using_genesis_block(network);
8630 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8632 // Go through the flow of opening a channel between two nodes.
8634 // Create Node A's channel pointing to Node B's pubkey
8635 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8636 let config = UserConfig::default();
8637 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8639 // Create Node B's channel by receiving Node A's open_channel message
8640 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8641 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8642 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8644 // Node B --> Node A: accept channel
8645 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8646 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8648 // Node A --> Node B: funding created
8649 let output_script = node_a_chan.context.get_funding_redeemscript();
8650 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8651 value: 10000000, script_pubkey: output_script.clone(),
8653 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8654 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8655 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8657 // Node B --> Node A: funding signed
8658 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8659 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8661 // Now disconnect the two nodes and check that the commitment point in
8662 // Node B's channel_reestablish message is sane.
8663 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8664 let msg = node_b_chan.get_channel_reestablish(&&logger);
8665 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8666 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8667 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8669 // Check that the commitment point in Node A's channel_reestablish message
8671 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8672 let msg = node_a_chan.get_channel_reestablish(&&logger);
8673 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8674 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8675 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8679 fn test_configured_holder_max_htlc_value_in_flight() {
8680 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8681 let logger = test_utils::TestLogger::new();
8682 let secp_ctx = Secp256k1::new();
8683 let seed = [42; 32];
8684 let network = Network::Testnet;
8685 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8686 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8687 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8689 let mut config_2_percent = UserConfig::default();
8690 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8691 let mut config_99_percent = UserConfig::default();
8692 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8693 let mut config_0_percent = UserConfig::default();
8694 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8695 let mut config_101_percent = UserConfig::default();
8696 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8698 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8699 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8700 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8701 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8702 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8703 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8705 // Test with the upper bound - 1 of valid values (99%).
8706 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8707 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8708 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8710 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8712 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8713 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8714 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8715 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8716 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8717 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8719 // Test with the upper bound - 1 of valid values (99%).
8720 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8721 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8722 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8724 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8725 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8726 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8727 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8728 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8730 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8731 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8733 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8734 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8735 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8737 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8738 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8739 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8740 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8741 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8743 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8744 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8746 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8747 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8748 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8752 fn test_configured_holder_selected_channel_reserve_satoshis() {
8754 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8755 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8756 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8758 // Test with valid but unreasonably high channel reserves
8759 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8760 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8761 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8763 // Test with calculated channel reserve less than lower bound
8764 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8765 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8767 // Test with invalid channel reserves since sum of both is greater than or equal
8769 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8770 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8773 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8774 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8775 let logger = test_utils::TestLogger::new();
8776 let secp_ctx = Secp256k1::new();
8777 let seed = [42; 32];
8778 let network = Network::Testnet;
8779 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8780 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8781 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8784 let mut outbound_node_config = UserConfig::default();
8785 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8786 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8788 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8789 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8791 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8792 let mut inbound_node_config = UserConfig::default();
8793 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8795 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8796 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8798 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8800 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8801 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8803 // Channel Negotiations failed
8804 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8805 assert!(result.is_err());
8810 fn channel_update() {
8811 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8812 let logger = test_utils::TestLogger::new();
8813 let secp_ctx = Secp256k1::new();
8814 let seed = [42; 32];
8815 let network = Network::Testnet;
8816 let best_block = BestBlock::from_network(network);
8817 let chain_hash = ChainHash::using_genesis_block(network);
8818 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8820 // Create Node A's channel pointing to Node B's pubkey
8821 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8822 let config = UserConfig::default();
8823 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8825 // Create Node B's channel by receiving Node A's open_channel message
8826 // Make sure A's dust limit is as we expect.
8827 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8828 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8829 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8831 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8832 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8833 accept_channel_msg.dust_limit_satoshis = 546;
8834 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8835 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8837 // Node A --> Node B: funding created
8838 let output_script = node_a_chan.context.get_funding_redeemscript();
8839 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8840 value: 10000000, script_pubkey: output_script.clone(),
8842 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8843 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8844 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8846 // Node B --> Node A: funding signed
8847 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8848 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8850 // Make sure that receiving a channel update will update the Channel as expected.
8851 let update = ChannelUpdate {
8852 contents: UnsignedChannelUpdate {
8854 short_channel_id: 0,
8857 cltv_expiry_delta: 100,
8858 htlc_minimum_msat: 5,
8859 htlc_maximum_msat: MAX_VALUE_MSAT,
8861 fee_proportional_millionths: 11,
8862 excess_data: Vec::new(),
8864 signature: Signature::from(unsafe { FFISignature::new() })
8866 assert!(node_a_chan.channel_update(&update).unwrap());
8868 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8869 // change our official htlc_minimum_msat.
8870 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8871 match node_a_chan.context.counterparty_forwarding_info() {
8873 assert_eq!(info.cltv_expiry_delta, 100);
8874 assert_eq!(info.fee_base_msat, 110);
8875 assert_eq!(info.fee_proportional_millionths, 11);
8877 None => panic!("expected counterparty forwarding info to be Some")
8880 assert!(!node_a_chan.channel_update(&update).unwrap());
8884 fn blinding_point_skimmed_fee_malformed_ser() {
8885 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
8887 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8888 let secp_ctx = Secp256k1::new();
8889 let seed = [42; 32];
8890 let network = Network::Testnet;
8891 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8893 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8894 let config = UserConfig::default();
8895 let features = channelmanager::provided_init_features(&config);
8896 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8897 let mut chan = Channel { context: outbound_chan.context };
8899 let dummy_htlc_source = HTLCSource::OutboundRoute {
8901 hops: vec![RouteHop {
8902 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8903 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8904 cltv_expiry_delta: 0, maybe_announced_channel: false,
8908 session_priv: test_utils::privkey(42),
8909 first_hop_htlc_msat: 0,
8910 payment_id: PaymentId([42; 32]),
8912 let dummy_outbound_output = OutboundHTLCOutput {
8915 payment_hash: PaymentHash([43; 32]),
8917 state: OutboundHTLCState::Committed,
8918 source: dummy_htlc_source.clone(),
8919 skimmed_fee_msat: None,
8920 blinding_point: None,
8922 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8923 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8925 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8928 htlc.skimmed_fee_msat = Some(1);
8931 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8933 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8936 payment_hash: PaymentHash([43; 32]),
8937 source: dummy_htlc_source.clone(),
8938 onion_routing_packet: msgs::OnionPacket {
8940 public_key: Ok(test_utils::pubkey(1)),
8941 hop_data: [0; 20*65],
8944 skimmed_fee_msat: None,
8945 blinding_point: None,
8947 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8948 payment_preimage: PaymentPreimage([42; 32]),
8951 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
8952 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
8954 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
8955 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
8957 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
8960 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8961 } else if i % 5 == 1 {
8962 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8963 } else if i % 5 == 2 {
8964 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8965 if let HTLCUpdateAwaitingACK::AddHTLC {
8966 ref mut blinding_point, ref mut skimmed_fee_msat, ..
8967 } = &mut dummy_add {
8968 *blinding_point = Some(test_utils::pubkey(42 + i));
8969 *skimmed_fee_msat = Some(42);
8971 holding_cell_htlc_updates.push(dummy_add);
8972 } else if i % 5 == 3 {
8973 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
8975 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
8978 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8980 // Encode and decode the channel and ensure that the HTLCs within are the same.
8981 let encoded_chan = chan.encode();
8982 let mut s = crate::io::Cursor::new(&encoded_chan);
8983 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8984 let features = channelmanager::provided_channel_type_features(&config);
8985 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8986 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8987 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8990 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8992 fn outbound_commitment_test() {
8993 use bitcoin::sighash;
8994 use bitcoin::consensus::encode::serialize;
8995 use bitcoin::sighash::EcdsaSighashType;
8996 use bitcoin::hashes::hex::FromHex;
8997 use bitcoin::hash_types::Txid;
8998 use bitcoin::secp256k1::Message;
8999 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
9000 use crate::ln::PaymentPreimage;
9001 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
9002 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
9003 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
9004 use crate::util::logger::Logger;
9005 use crate::sync::Arc;
9006 use core::str::FromStr;
9007 use hex::DisplayHex;
9009 // Test vectors from BOLT 3 Appendices C and F (anchors):
9010 let feeest = TestFeeEstimator{fee_est: 15000};
9011 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
9012 let secp_ctx = Secp256k1::new();
9014 let mut signer = InMemorySigner::new(
9016 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
9017 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9018 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9019 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
9020 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9022 // These aren't set in the test vectors:
9023 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
9029 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
9030 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
9031 let keys_provider = Keys { signer: signer.clone() };
9033 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9034 let mut config = UserConfig::default();
9035 config.channel_handshake_config.announced_channel = false;
9036 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
9037 chan.context.holder_dust_limit_satoshis = 546;
9038 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
9040 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
9042 let counterparty_pubkeys = ChannelPublicKeys {
9043 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
9044 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
9045 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
9046 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
9047 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
9049 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
9050 CounterpartyChannelTransactionParameters {
9051 pubkeys: counterparty_pubkeys.clone(),
9052 selected_contest_delay: 144
9054 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
9055 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9057 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9058 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9060 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9061 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9063 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9064 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9066 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9067 // derived from a commitment_seed, so instead we copy it here and call
9068 // build_commitment_transaction.
9069 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9070 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9071 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9072 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9073 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9075 macro_rules! test_commitment {
9076 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9077 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9078 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9082 macro_rules! test_commitment_with_anchors {
9083 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9084 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9085 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9089 macro_rules! test_commitment_common {
9090 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9091 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9093 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9094 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9096 let htlcs = commitment_stats.htlcs_included.drain(..)
9097 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9099 (commitment_stats.tx, htlcs)
9101 let trusted_tx = commitment_tx.trust();
9102 let unsigned_tx = trusted_tx.built_transaction();
9103 let redeemscript = chan.context.get_funding_redeemscript();
9104 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9105 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9106 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9107 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9109 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9110 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9111 let mut counterparty_htlc_sigs = Vec::new();
9112 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9114 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9115 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9116 counterparty_htlc_sigs.push(remote_signature);
9118 assert_eq!(htlcs.len(), per_htlc.len());
9120 let holder_commitment_tx = HolderCommitmentTransaction::new(
9121 commitment_tx.clone(),
9122 counterparty_signature,
9123 counterparty_htlc_sigs,
9124 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9125 chan.context.counterparty_funding_pubkey()
9127 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9128 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9130 let funding_redeemscript = chan.context.get_funding_redeemscript();
9131 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9132 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9134 // ((htlc, counterparty_sig), (index, holder_sig))
9135 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
9138 log_trace!(logger, "verifying htlc {}", $htlc_idx);
9139 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9141 let ref htlc = htlcs[$htlc_idx];
9142 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
9143 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9144 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9145 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9146 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9147 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9148 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9150 let mut preimage: Option<PaymentPreimage> = None;
9153 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9154 if out == htlc.payment_hash {
9155 preimage = Some(PaymentPreimage([i; 32]));
9159 assert!(preimage.is_some());
9162 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
9163 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
9164 channel_derivation_parameters: ChannelDerivationParameters {
9165 value_satoshis: chan.context.channel_value_satoshis,
9166 keys_id: chan.context.channel_keys_id,
9167 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9169 commitment_txid: trusted_tx.txid(),
9170 per_commitment_number: trusted_tx.commitment_number(),
9171 per_commitment_point: trusted_tx.per_commitment_point(),
9172 feerate_per_kw: trusted_tx.feerate_per_kw(),
9174 preimage: preimage.clone(),
9175 counterparty_sig: *htlc_counterparty_sig,
9176 }, &secp_ctx).unwrap();
9177 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9178 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9180 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9181 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9182 let trusted_tx = holder_commitment_tx.trust();
9183 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9184 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9185 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9187 assert!(htlc_counterparty_sig_iter.next().is_none());
9191 // anchors: simple commitment tx with no HTLCs and single anchor
9192 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9193 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9194 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9196 // simple commitment tx with no HTLCs
9197 chan.context.value_to_self_msat = 7000000000;
9199 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9200 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9201 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9203 // anchors: simple commitment tx with no HTLCs
9204 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9205 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9206 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9208 chan.context.pending_inbound_htlcs.push({
9209 let mut out = InboundHTLCOutput{
9211 amount_msat: 1000000,
9213 payment_hash: PaymentHash([0; 32]),
9214 state: InboundHTLCState::Committed,
9216 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9219 chan.context.pending_inbound_htlcs.push({
9220 let mut out = InboundHTLCOutput{
9222 amount_msat: 2000000,
9224 payment_hash: PaymentHash([0; 32]),
9225 state: InboundHTLCState::Committed,
9227 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9230 chan.context.pending_outbound_htlcs.push({
9231 let mut out = OutboundHTLCOutput{
9233 amount_msat: 2000000,
9235 payment_hash: PaymentHash([0; 32]),
9236 state: OutboundHTLCState::Committed,
9237 source: HTLCSource::dummy(),
9238 skimmed_fee_msat: None,
9239 blinding_point: None,
9241 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9244 chan.context.pending_outbound_htlcs.push({
9245 let mut out = OutboundHTLCOutput{
9247 amount_msat: 3000000,
9249 payment_hash: PaymentHash([0; 32]),
9250 state: OutboundHTLCState::Committed,
9251 source: HTLCSource::dummy(),
9252 skimmed_fee_msat: None,
9253 blinding_point: None,
9255 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9258 chan.context.pending_inbound_htlcs.push({
9259 let mut out = InboundHTLCOutput{
9261 amount_msat: 4000000,
9263 payment_hash: PaymentHash([0; 32]),
9264 state: InboundHTLCState::Committed,
9266 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9270 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9271 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9272 chan.context.feerate_per_kw = 0;
9274 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9275 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9276 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9279 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9280 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9281 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9284 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9285 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9286 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9289 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9290 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9291 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9294 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9295 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9296 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9299 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9300 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9301 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9304 // commitment tx with seven outputs untrimmed (maximum feerate)
9305 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9306 chan.context.feerate_per_kw = 647;
9308 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9309 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9310 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9313 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9314 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9315 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9318 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9319 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9320 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9323 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9324 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9325 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9328 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9329 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9330 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9333 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9334 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9335 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9338 // commitment tx with six outputs untrimmed (minimum feerate)
9339 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9340 chan.context.feerate_per_kw = 648;
9342 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9343 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9344 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9347 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9348 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9349 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9352 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9353 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9354 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9357 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9358 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9359 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9362 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9363 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9364 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9367 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9368 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9369 chan.context.feerate_per_kw = 645;
9370 chan.context.holder_dust_limit_satoshis = 1001;
9372 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9373 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9374 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9377 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9378 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9379 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9382 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9383 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9384 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9387 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9388 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9389 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9392 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9393 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9394 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9397 // commitment tx with six outputs untrimmed (maximum feerate)
9398 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9399 chan.context.feerate_per_kw = 2069;
9400 chan.context.holder_dust_limit_satoshis = 546;
9402 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9403 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9404 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9407 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9408 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9409 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9412 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9413 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9414 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9417 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9418 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9419 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9422 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9423 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9424 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9427 // commitment tx with five outputs untrimmed (minimum feerate)
9428 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9429 chan.context.feerate_per_kw = 2070;
9431 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9432 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9433 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9436 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9437 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9438 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9441 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9442 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9443 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9446 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9447 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9448 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9451 // commitment tx with five outputs untrimmed (maximum feerate)
9452 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9453 chan.context.feerate_per_kw = 2194;
9455 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9456 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9457 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9460 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9461 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9462 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9465 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9466 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9467 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9470 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9471 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9472 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9475 // commitment tx with four outputs untrimmed (minimum feerate)
9476 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9477 chan.context.feerate_per_kw = 2195;
9479 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9480 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9481 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9484 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9485 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9486 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9489 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9490 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9491 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9494 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9495 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9496 chan.context.feerate_per_kw = 2185;
9497 chan.context.holder_dust_limit_satoshis = 2001;
9498 let cached_channel_type = chan.context.channel_type;
9499 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9501 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9502 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9503 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9506 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9507 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9508 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9511 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9512 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9513 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9516 // commitment tx with four outputs untrimmed (maximum feerate)
9517 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9518 chan.context.feerate_per_kw = 3702;
9519 chan.context.holder_dust_limit_satoshis = 546;
9520 chan.context.channel_type = cached_channel_type.clone();
9522 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9523 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9524 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9527 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9528 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9529 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9532 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9533 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9534 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9537 // commitment tx with three outputs untrimmed (minimum feerate)
9538 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9539 chan.context.feerate_per_kw = 3703;
9541 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9542 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9543 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9546 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9547 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9548 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9551 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9552 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9553 chan.context.feerate_per_kw = 3687;
9554 chan.context.holder_dust_limit_satoshis = 3001;
9555 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9557 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9558 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9559 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9562 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9563 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9564 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9567 // commitment tx with three outputs untrimmed (maximum feerate)
9568 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9569 chan.context.feerate_per_kw = 4914;
9570 chan.context.holder_dust_limit_satoshis = 546;
9571 chan.context.channel_type = cached_channel_type.clone();
9573 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9574 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9575 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9578 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9579 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9580 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9583 // commitment tx with two outputs untrimmed (minimum feerate)
9584 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9585 chan.context.feerate_per_kw = 4915;
9586 chan.context.holder_dust_limit_satoshis = 546;
9588 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9589 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9590 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9592 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9593 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9594 chan.context.feerate_per_kw = 4894;
9595 chan.context.holder_dust_limit_satoshis = 4001;
9596 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9598 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9599 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9600 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9602 // commitment tx with two outputs untrimmed (maximum feerate)
9603 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9604 chan.context.feerate_per_kw = 9651180;
9605 chan.context.holder_dust_limit_satoshis = 546;
9606 chan.context.channel_type = cached_channel_type.clone();
9608 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9609 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9610 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9612 // commitment tx with one output untrimmed (minimum feerate)
9613 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9614 chan.context.feerate_per_kw = 9651181;
9616 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9617 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9618 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9620 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9621 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9622 chan.context.feerate_per_kw = 6216010;
9623 chan.context.holder_dust_limit_satoshis = 4001;
9624 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9626 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9627 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9628 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9630 // commitment tx with fee greater than funder amount
9631 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9632 chan.context.feerate_per_kw = 9651936;
9633 chan.context.holder_dust_limit_satoshis = 546;
9634 chan.context.channel_type = cached_channel_type;
9636 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9637 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9638 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9640 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9641 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9642 chan.context.feerate_per_kw = 253;
9643 chan.context.pending_inbound_htlcs.clear();
9644 chan.context.pending_inbound_htlcs.push({
9645 let mut out = InboundHTLCOutput{
9647 amount_msat: 2000000,
9649 payment_hash: PaymentHash([0; 32]),
9650 state: InboundHTLCState::Committed,
9652 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9655 chan.context.pending_outbound_htlcs.clear();
9656 chan.context.pending_outbound_htlcs.push({
9657 let mut out = OutboundHTLCOutput{
9659 amount_msat: 5000001,
9661 payment_hash: PaymentHash([0; 32]),
9662 state: OutboundHTLCState::Committed,
9663 source: HTLCSource::dummy(),
9664 skimmed_fee_msat: None,
9665 blinding_point: None,
9667 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9670 chan.context.pending_outbound_htlcs.push({
9671 let mut out = OutboundHTLCOutput{
9673 amount_msat: 5000000,
9675 payment_hash: PaymentHash([0; 32]),
9676 state: OutboundHTLCState::Committed,
9677 source: HTLCSource::dummy(),
9678 skimmed_fee_msat: None,
9679 blinding_point: None,
9681 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9685 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9686 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9687 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9690 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9691 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9692 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9694 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9695 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9696 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9698 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9699 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9700 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9703 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9704 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9705 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9706 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9709 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9710 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9711 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9713 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9714 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9715 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9717 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9718 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9719 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9724 fn test_per_commitment_secret_gen() {
9725 // Test vectors from BOLT 3 Appendix D:
9727 let mut seed = [0; 32];
9728 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9729 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9730 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9732 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9733 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9734 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9736 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9737 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9739 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9740 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9742 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9743 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9744 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9748 fn test_key_derivation() {
9749 // Test vectors from BOLT 3 Appendix E:
9750 let secp_ctx = Secp256k1::new();
9752 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9753 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9755 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9756 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9758 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9759 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9761 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9762 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9764 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9765 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9767 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9768 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9772 fn test_zero_conf_channel_type_support() {
9773 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9774 let secp_ctx = Secp256k1::new();
9775 let seed = [42; 32];
9776 let network = Network::Testnet;
9777 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9778 let logger = test_utils::TestLogger::new();
9780 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9781 let config = UserConfig::default();
9782 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9783 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9785 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9786 channel_type_features.set_zero_conf_required();
9788 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9789 open_channel_msg.channel_type = Some(channel_type_features);
9790 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9791 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9792 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9793 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9794 assert!(res.is_ok());
9798 fn test_supports_anchors_zero_htlc_tx_fee() {
9799 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9800 // resulting `channel_type`.
9801 let secp_ctx = Secp256k1::new();
9802 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9803 let network = Network::Testnet;
9804 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9805 let logger = test_utils::TestLogger::new();
9807 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9808 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9810 let mut config = UserConfig::default();
9811 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9813 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9814 // need to signal it.
9815 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9816 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9817 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9818 &config, 0, 42, None
9820 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9822 let mut expected_channel_type = ChannelTypeFeatures::empty();
9823 expected_channel_type.set_static_remote_key_required();
9824 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9826 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9827 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9828 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9832 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9833 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9834 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9835 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9836 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9839 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9840 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9844 fn test_rejects_implicit_simple_anchors() {
9845 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9846 // each side's `InitFeatures`, it is rejected.
9847 let secp_ctx = Secp256k1::new();
9848 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9849 let network = Network::Testnet;
9850 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9851 let logger = test_utils::TestLogger::new();
9853 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9854 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9856 let config = UserConfig::default();
9858 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9859 let static_remote_key_required: u64 = 1 << 12;
9860 let simple_anchors_required: u64 = 1 << 20;
9861 let raw_init_features = static_remote_key_required | simple_anchors_required;
9862 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9864 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9865 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9866 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9870 // Set `channel_type` to `None` to force the implicit feature negotiation.
9871 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9872 open_channel_msg.channel_type = None;
9874 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9875 // `static_remote_key`, it will fail the channel.
9876 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9877 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9878 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9879 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9881 assert!(channel_b.is_err());
9885 fn test_rejects_simple_anchors_channel_type() {
9886 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9888 let secp_ctx = Secp256k1::new();
9889 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9890 let network = Network::Testnet;
9891 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9892 let logger = test_utils::TestLogger::new();
9894 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9895 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9897 let config = UserConfig::default();
9899 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9900 let static_remote_key_required: u64 = 1 << 12;
9901 let simple_anchors_required: u64 = 1 << 20;
9902 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9903 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9904 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9905 assert!(!simple_anchors_init.requires_unknown_bits());
9906 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9908 // First, we'll try to open a channel between A and B where A requests a channel type for
9909 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9910 // B as it's not supported by LDK.
9911 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9912 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9913 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9917 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9918 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9920 let res = InboundV1Channel::<&TestKeysInterface>::new(
9921 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9922 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9923 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9925 assert!(res.is_err());
9927 // Then, we'll try to open another channel where A requests a channel type for
9928 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9929 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9931 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9932 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9933 10000000, 100000, 42, &config, 0, 42, None
9936 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9938 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9939 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9940 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9941 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9944 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9945 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9947 let res = channel_a.accept_channel(
9948 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9950 assert!(res.is_err());
9954 fn test_waiting_for_batch() {
9955 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9956 let logger = test_utils::TestLogger::new();
9957 let secp_ctx = Secp256k1::new();
9958 let seed = [42; 32];
9959 let network = Network::Testnet;
9960 let best_block = BestBlock::from_network(network);
9961 let chain_hash = ChainHash::using_genesis_block(network);
9962 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9964 let mut config = UserConfig::default();
9965 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9966 // channel in a batch before all channels are ready.
9967 config.channel_handshake_limits.trust_own_funding_0conf = true;
9969 // Create a channel from node a to node b that will be part of batch funding.
9970 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9971 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9976 &channelmanager::provided_init_features(&config),
9986 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9987 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9988 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9993 &channelmanager::provided_channel_type_features(&config),
9994 &channelmanager::provided_init_features(&config),
10000 true, // Allow node b to send a 0conf channel_ready.
10003 let accept_channel_msg = node_b_chan.accept_inbound_channel();
10004 node_a_chan.accept_channel(
10005 &accept_channel_msg,
10006 &config.channel_handshake_limits,
10007 &channelmanager::provided_init_features(&config),
10010 // Fund the channel with a batch funding transaction.
10011 let output_script = node_a_chan.context.get_funding_redeemscript();
10012 let tx = Transaction {
10014 lock_time: LockTime::ZERO,
10018 value: 10000000, script_pubkey: output_script.clone(),
10021 value: 10000000, script_pubkey: Builder::new().into_script(),
10024 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10025 let funding_created_msg = node_a_chan.get_funding_created(
10026 tx.clone(), funding_outpoint, true, &&logger,
10027 ).map_err(|_| ()).unwrap();
10028 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
10029 &funding_created_msg.unwrap(),
10033 ).map_err(|_| ()).unwrap();
10034 let node_b_updates = node_b_chan.monitor_updating_restored(
10042 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
10043 // broadcasting the funding transaction until the batch is ready.
10044 let res = node_a_chan.funding_signed(
10045 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
10047 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10048 let node_a_updates = node_a_chan.monitor_updating_restored(
10055 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10056 // as the funding transaction depends on all channels in the batch becoming ready.
10057 assert!(node_a_updates.channel_ready.is_none());
10058 assert!(node_a_updates.funding_broadcastable.is_none());
10059 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10061 // It is possible to receive a 0conf channel_ready from the remote node.
10062 node_a_chan.channel_ready(
10063 &node_b_updates.channel_ready.unwrap(),
10071 node_a_chan.context.channel_state,
10072 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10075 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10076 node_a_chan.set_batch_ready();
10077 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10078 assert!(node_a_chan.check_get_channel_ready(0).is_some());