1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
265 sha256_of_onion: [u8; 32],
269 macro_rules! define_state_flags {
270 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
271 #[doc = $flag_type_doc]
272 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
273 struct $flag_type(u32);
278 const $flag: $flag_type = $flag_type($value);
281 /// All flags that apply to the specified [`ChannelState`] variant.
283 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
286 fn new() -> Self { Self(0) }
289 fn from_u32(flags: u32) -> Result<Self, ()> {
290 if flags & !Self::ALL.0 != 0 {
293 Ok($flag_type(flags))
298 fn is_empty(&self) -> bool { self.0 == 0 }
301 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
304 impl core::ops::Not for $flag_type {
306 fn not(self) -> Self::Output { Self(!self.0) }
308 impl core::ops::BitOr for $flag_type {
310 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
312 impl core::ops::BitOrAssign for $flag_type {
313 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
315 impl core::ops::BitAnd for $flag_type {
317 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
319 impl core::ops::BitAndAssign for $flag_type {
320 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
323 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
324 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
326 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
327 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
328 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
330 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
332 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
333 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
335 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
337 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
339 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
340 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
342 impl PartialEq<FundedStateFlags> for $flag_type {
343 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
345 impl From<FundedStateFlags> for $flag_type {
346 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
351 /// We declare all the states/flags here together to help determine which bits are still available
354 pub const OUR_INIT_SENT: u32 = 1 << 0;
355 pub const THEIR_INIT_SENT: u32 = 1 << 1;
356 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
357 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
358 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
359 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
360 pub const CHANNEL_READY: u32 = 1 << 6;
361 pub const PEER_DISCONNECTED: u32 = 1 << 7;
362 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
363 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
364 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
365 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
366 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
367 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
371 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
373 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
374 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
375 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
376 somewhere and we should pause sending any outbound messages until they've managed to \
377 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
378 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
379 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
380 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
381 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
382 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
387 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
388 NegotiatingFundingFlags, [
389 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
390 OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
391 ("Indicates we have received their `open_channel`/`accept_channel` message.",
392 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
397 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
398 FUNDED_STATE, AwaitingChannelReadyFlags, [
399 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
400 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
401 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
402 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
403 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
404 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
405 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
406 is being held until all channels in the batch have received `funding_signed` and have \
407 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
412 "Flags that only apply to [`ChannelState::ChannelReady`].",
413 FUNDED_STATE, ChannelReadyFlags, [
414 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
415 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
416 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
417 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
418 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
422 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
424 /// We are negotiating the parameters required for the channel prior to funding it.
425 NegotiatingFunding(NegotiatingFundingFlags),
426 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
427 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
428 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
430 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
431 /// funding transaction to confirm.
432 AwaitingChannelReady(AwaitingChannelReadyFlags),
433 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
435 ChannelReady(ChannelReadyFlags),
436 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
437 /// is about to drop us, but we store this anyway.
441 macro_rules! impl_state_flag {
442 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
444 fn $get(&self) -> bool {
447 ChannelState::$state(flags) => flags.is_set($state_flag.into()),
456 ChannelState::$state(flags) => *flags |= $state_flag,
458 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
462 fn $clear(&mut self) {
465 ChannelState::$state(flags) => *flags &= !($state_flag),
467 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
471 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
472 impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
474 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
475 impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
480 fn from_u32(state: u32) -> Result<Self, ()> {
482 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
483 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
485 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
486 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
487 .map(|flags| ChannelState::AwaitingChannelReady(flags))
488 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
489 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
490 .map(|flags| ChannelState::ChannelReady(flags))
491 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
492 Ok(ChannelState::NegotiatingFunding(flags))
500 fn to_u32(&self) -> u32 {
502 ChannelState::NegotiatingFunding(flags) => flags.0,
503 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
504 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
505 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
506 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
510 fn is_pre_funded_state(&self) -> bool {
511 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
514 fn is_both_sides_shutdown(&self) -> bool {
515 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
518 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
520 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
521 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
522 _ => FundedStateFlags::new(),
526 fn should_force_holding_cell(&self) -> bool {
528 ChannelState::ChannelReady(flags) =>
529 flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
530 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
531 flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
533 debug_assert!(false, "The holding cell is only valid within ChannelReady");
539 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
540 FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
541 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
542 FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
543 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
544 FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
545 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
546 FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
547 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
548 AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
549 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
550 AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
551 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
552 AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
553 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
554 ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
557 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
559 pub const DEFAULT_MAX_HTLCS: u16 = 50;
561 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
562 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
563 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
564 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
568 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
570 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
572 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
574 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
575 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
576 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
577 /// `holder_max_htlc_value_in_flight_msat`.
578 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
580 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
581 /// `option_support_large_channel` (aka wumbo channels) is not supported.
583 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
585 /// Total bitcoin supply in satoshis.
586 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
588 /// The maximum network dust limit for standard script formats. This currently represents the
589 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
590 /// transaction non-standard and thus refuses to relay it.
591 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
592 /// implementations use this value for their dust limit today.
593 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
595 /// The maximum channel dust limit we will accept from our counterparty.
596 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
598 /// The dust limit is used for both the commitment transaction outputs as well as the closing
599 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
600 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
601 /// In order to avoid having to concern ourselves with standardness during the closing process, we
602 /// simply require our counterparty to use a dust limit which will leave any segwit output
604 /// See <https://github.com/lightning/bolts/issues/905> for more details.
605 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
607 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
608 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
610 /// Used to return a simple Error back to ChannelManager. Will get converted to a
611 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
612 /// channel_id in ChannelManager.
613 pub(super) enum ChannelError {
619 impl fmt::Debug for ChannelError {
620 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
622 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
623 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
624 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
629 impl fmt::Display for ChannelError {
630 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
632 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
633 &ChannelError::Warn(ref e) => write!(f, "{}", e),
634 &ChannelError::Close(ref e) => write!(f, "{}", e),
639 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
641 pub peer_id: Option<PublicKey>,
642 pub channel_id: Option<ChannelId>,
645 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
646 fn log(&self, mut record: Record) {
647 record.peer_id = self.peer_id;
648 record.channel_id = self.channel_id;
649 self.logger.log(record)
653 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
654 where L::Target: Logger {
655 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
656 where S::Target: SignerProvider
660 peer_id: Some(context.counterparty_node_id),
661 channel_id: Some(context.channel_id),
666 macro_rules! secp_check {
667 ($res: expr, $err: expr) => {
670 Err(_) => return Err(ChannelError::Close($err)),
675 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
676 /// our counterparty or not. However, we don't want to announce updates right away to avoid
677 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
678 /// our channel_update message and track the current state here.
679 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
680 #[derive(Clone, Copy, PartialEq)]
681 pub(super) enum ChannelUpdateStatus {
682 /// We've announced the channel as enabled and are connected to our peer.
684 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
686 /// Our channel is live again, but we haven't announced the channel as enabled yet.
688 /// We've announced the channel as disabled.
692 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
694 pub enum AnnouncementSigsState {
695 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
696 /// we sent the last `AnnouncementSignatures`.
698 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
699 /// This state never appears on disk - instead we write `NotSent`.
701 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
702 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
703 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
704 /// they send back a `RevokeAndACK`.
705 /// This state never appears on disk - instead we write `NotSent`.
707 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
708 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
712 /// An enum indicating whether the local or remote side offered a given HTLC.
718 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
721 pending_htlcs_value_msat: u64,
722 on_counterparty_tx_dust_exposure_msat: u64,
723 on_holder_tx_dust_exposure_msat: u64,
724 holding_cell_msat: u64,
725 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
728 /// An enum gathering stats on commitment transaction, either local or remote.
729 struct CommitmentStats<'a> {
730 tx: CommitmentTransaction, // the transaction info
731 feerate_per_kw: u32, // the feerate included to build the transaction
732 total_fee_sat: u64, // the total fee included in the transaction
733 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
734 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
735 local_balance_msat: u64, // local balance before fees but considering dust limits
736 remote_balance_msat: u64, // remote balance before fees but considering dust limits
737 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
738 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
741 /// Used when calculating whether we or the remote can afford an additional HTLC.
742 struct HTLCCandidate {
744 origin: HTLCInitiator,
748 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
756 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
758 enum UpdateFulfillFetch {
760 monitor_update: ChannelMonitorUpdate,
761 htlc_value_msat: u64,
762 msg: Option<msgs::UpdateFulfillHTLC>,
767 /// The return type of get_update_fulfill_htlc_and_commit.
768 pub enum UpdateFulfillCommitFetch {
769 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
770 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
771 /// previously placed in the holding cell (and has since been removed).
773 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
774 monitor_update: ChannelMonitorUpdate,
775 /// The value of the HTLC which was claimed, in msat.
776 htlc_value_msat: u64,
778 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
779 /// or has been forgotten (presumably previously claimed).
783 /// The return value of `monitor_updating_restored`
784 pub(super) struct MonitorRestoreUpdates {
785 pub raa: Option<msgs::RevokeAndACK>,
786 pub commitment_update: Option<msgs::CommitmentUpdate>,
787 pub order: RAACommitmentOrder,
788 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
789 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
790 pub finalized_claimed_htlcs: Vec<HTLCSource>,
791 pub funding_broadcastable: Option<Transaction>,
792 pub channel_ready: Option<msgs::ChannelReady>,
793 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
796 /// The return value of `signer_maybe_unblocked`
798 pub(super) struct SignerResumeUpdates {
799 pub commitment_update: Option<msgs::CommitmentUpdate>,
800 pub funding_signed: Option<msgs::FundingSigned>,
801 pub channel_ready: Option<msgs::ChannelReady>,
804 /// The return value of `channel_reestablish`
805 pub(super) struct ReestablishResponses {
806 pub channel_ready: Option<msgs::ChannelReady>,
807 pub raa: Option<msgs::RevokeAndACK>,
808 pub commitment_update: Option<msgs::CommitmentUpdate>,
809 pub order: RAACommitmentOrder,
810 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
811 pub shutdown_msg: Option<msgs::Shutdown>,
814 /// The result of a shutdown that should be handled.
816 pub(crate) struct ShutdownResult {
817 /// A channel monitor update to apply.
818 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
819 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
820 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
821 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
822 /// propagated to the remainder of the batch.
823 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
824 pub(crate) channel_id: ChannelId,
825 pub(crate) counterparty_node_id: PublicKey,
828 /// If the majority of the channels funds are to the fundee and the initiator holds only just
829 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
830 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
831 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
832 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
833 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
834 /// by this multiple without hitting this case, before sending.
835 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
836 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
837 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
838 /// leave the channel less usable as we hold a bigger reserve.
839 #[cfg(any(fuzzing, test))]
840 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
841 #[cfg(not(any(fuzzing, test)))]
842 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
844 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
845 /// channel creation on an inbound channel, we simply force-close and move on.
846 /// This constant is the one suggested in BOLT 2.
847 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
849 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
850 /// not have enough balance value remaining to cover the onchain cost of this new
851 /// HTLC weight. If this happens, our counterparty fails the reception of our
852 /// commitment_signed including this new HTLC due to infringement on the channel
854 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
855 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
856 /// leads to a channel force-close. Ultimately, this is an issue coming from the
857 /// design of LN state machines, allowing asynchronous updates.
858 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
860 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
861 /// commitment transaction fees, with at least this many HTLCs present on the commitment
862 /// transaction (not counting the value of the HTLCs themselves).
863 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
865 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
866 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
867 /// ChannelUpdate prompted by the config update. This value was determined as follows:
869 /// * The expected interval between ticks (1 minute).
870 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
871 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
872 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
873 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
875 /// The number of ticks that may elapse while we're waiting for a response to a
876 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
879 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
880 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
882 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
883 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
884 /// exceeding this age limit will be force-closed and purged from memory.
885 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
887 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
888 pub(crate) const COINBASE_MATURITY: u32 = 100;
890 struct PendingChannelMonitorUpdate {
891 update: ChannelMonitorUpdate,
894 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
895 (0, update, required),
898 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
899 /// its variants containing an appropriate channel struct.
900 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
901 UnfundedOutboundV1(OutboundV1Channel<SP>),
902 UnfundedInboundV1(InboundV1Channel<SP>),
906 impl<'a, SP: Deref> ChannelPhase<SP> where
907 SP::Target: SignerProvider,
908 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
910 pub fn context(&'a self) -> &'a ChannelContext<SP> {
912 ChannelPhase::Funded(chan) => &chan.context,
913 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
914 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
918 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
920 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
921 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
922 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
927 /// Contains all state common to unfunded inbound/outbound channels.
928 pub(super) struct UnfundedChannelContext {
929 /// A counter tracking how many ticks have elapsed since this unfunded channel was
930 /// created. If this unfunded channel reaches peer has yet to respond after reaching
931 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
933 /// This is so that we don't keep channels around that haven't progressed to a funded state
934 /// in a timely manner.
935 unfunded_channel_age_ticks: usize,
938 impl UnfundedChannelContext {
939 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
940 /// having reached the unfunded channel age limit.
942 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
943 pub fn should_expire_unfunded_channel(&mut self) -> bool {
944 self.unfunded_channel_age_ticks += 1;
945 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
949 /// Contains everything about the channel including state, and various flags.
950 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
951 config: LegacyChannelConfig,
953 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
954 // constructed using it. The second element in the tuple corresponds to the number of ticks that
955 // have elapsed since the update occurred.
956 prev_config: Option<(ChannelConfig, usize)>,
958 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
962 /// The current channel ID.
963 channel_id: ChannelId,
964 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
965 /// Will be `None` for channels created prior to 0.0.115.
966 temporary_channel_id: Option<ChannelId>,
967 channel_state: ChannelState,
969 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
970 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
972 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
973 // Note that a number of our tests were written prior to the behavior here which retransmits
974 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
976 #[cfg(any(test, feature = "_test_utils"))]
977 pub(crate) announcement_sigs_state: AnnouncementSigsState,
978 #[cfg(not(any(test, feature = "_test_utils")))]
979 announcement_sigs_state: AnnouncementSigsState,
981 secp_ctx: Secp256k1<secp256k1::All>,
982 channel_value_satoshis: u64,
984 latest_monitor_update_id: u64,
986 holder_signer: ChannelSignerType<SP>,
987 shutdown_scriptpubkey: Option<ShutdownScript>,
988 destination_script: ScriptBuf,
990 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
991 // generation start at 0 and count up...this simplifies some parts of implementation at the
992 // cost of others, but should really just be changed.
994 cur_holder_commitment_transaction_number: u64,
995 cur_counterparty_commitment_transaction_number: u64,
996 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
997 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
998 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
999 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1001 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1002 /// need to ensure we resend them in the order we originally generated them. Note that because
1003 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1004 /// sufficient to simply set this to the opposite of any message we are generating as we
1005 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1006 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1008 resend_order: RAACommitmentOrder,
1010 monitor_pending_channel_ready: bool,
1011 monitor_pending_revoke_and_ack: bool,
1012 monitor_pending_commitment_signed: bool,
1014 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1015 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1016 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1017 // but need to handle this somehow or we run the risk of losing HTLCs!
1018 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1019 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1020 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1022 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1023 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1024 /// the future when the signer indicates it may have a signature for us.
1026 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1027 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1028 signer_pending_commitment_update: bool,
1029 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1030 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1031 /// outbound or inbound.
1032 signer_pending_funding: bool,
1034 // pending_update_fee is filled when sending and receiving update_fee.
1036 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1037 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1038 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1039 // HTLCs with similar state.
1040 pending_update_fee: Option<(u32, FeeUpdateState)>,
1041 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1042 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1043 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1044 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1045 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1046 holding_cell_update_fee: Option<u32>,
1047 next_holder_htlc_id: u64,
1048 next_counterparty_htlc_id: u64,
1049 feerate_per_kw: u32,
1051 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1052 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1053 /// new block is received, ensuring it's always at least moderately close to the current real
1055 update_time_counter: u32,
1057 #[cfg(debug_assertions)]
1058 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1059 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1060 #[cfg(debug_assertions)]
1061 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1062 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1064 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1065 target_closing_feerate_sats_per_kw: Option<u32>,
1067 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1068 /// update, we need to delay processing it until later. We do that here by simply storing the
1069 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1070 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1072 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1073 /// transaction. These are set once we reach `closing_negotiation_ready`.
1075 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1077 closing_fee_limits: Option<(u64, u64)>,
1079 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1080 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1081 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1082 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1083 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1085 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1086 /// until we see a `commitment_signed` before doing so.
1088 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1089 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1090 expecting_peer_commitment_signed: bool,
1092 /// The hash of the block in which the funding transaction was included.
1093 funding_tx_confirmed_in: Option<BlockHash>,
1094 funding_tx_confirmation_height: u32,
1095 short_channel_id: Option<u64>,
1096 /// Either the height at which this channel was created or the height at which it was last
1097 /// serialized if it was serialized by versions prior to 0.0.103.
1098 /// We use this to close if funding is never broadcasted.
1099 channel_creation_height: u32,
1101 counterparty_dust_limit_satoshis: u64,
1104 pub(super) holder_dust_limit_satoshis: u64,
1106 holder_dust_limit_satoshis: u64,
1109 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1111 counterparty_max_htlc_value_in_flight_msat: u64,
1114 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1116 holder_max_htlc_value_in_flight_msat: u64,
1118 /// minimum channel reserve for self to maintain - set by them.
1119 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1122 pub(super) holder_selected_channel_reserve_satoshis: u64,
1124 holder_selected_channel_reserve_satoshis: u64,
1126 counterparty_htlc_minimum_msat: u64,
1127 holder_htlc_minimum_msat: u64,
1129 pub counterparty_max_accepted_htlcs: u16,
1131 counterparty_max_accepted_htlcs: u16,
1132 holder_max_accepted_htlcs: u16,
1133 minimum_depth: Option<u32>,
1135 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1137 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1138 funding_transaction: Option<Transaction>,
1139 is_batch_funding: Option<()>,
1141 counterparty_cur_commitment_point: Option<PublicKey>,
1142 counterparty_prev_commitment_point: Option<PublicKey>,
1143 counterparty_node_id: PublicKey,
1145 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1147 commitment_secrets: CounterpartyCommitmentSecrets,
1149 channel_update_status: ChannelUpdateStatus,
1150 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1151 /// not complete within a single timer tick (one minute), we should force-close the channel.
1152 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1154 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1155 /// our peer and start the closing_signed negotiation fresh.
1156 closing_signed_in_flight: bool,
1158 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1159 /// This can be used to rebroadcast the channel_announcement message later.
1160 announcement_sigs: Option<(Signature, Signature)>,
1162 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1163 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1164 // be, by comparing the cached values to the fee of the tranaction generated by
1165 // `build_commitment_transaction`.
1166 #[cfg(any(test, fuzzing))]
1167 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1168 #[cfg(any(test, fuzzing))]
1169 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1171 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1172 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1173 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1174 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1175 /// message until we receive a channel_reestablish.
1177 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1178 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1180 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1181 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1182 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1183 /// unblock the state machine.
1185 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1186 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1187 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1189 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1190 /// [`msgs::RevokeAndACK`] message from the counterparty.
1191 sent_message_awaiting_response: Option<usize>,
1193 #[cfg(any(test, fuzzing))]
1194 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1195 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1196 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1197 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1198 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1199 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1200 historical_inbound_htlc_fulfills: HashSet<u64>,
1202 /// This channel's type, as negotiated during channel open
1203 channel_type: ChannelTypeFeatures,
1205 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1206 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1207 // the channel's funding UTXO.
1209 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1210 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1211 // associated channel mapping.
1213 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1214 // to store all of them.
1215 latest_inbound_scid_alias: Option<u64>,
1217 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1218 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1219 // don't currently support node id aliases and eventually privacy should be provided with
1220 // blinded paths instead of simple scid+node_id aliases.
1221 outbound_scid_alias: u64,
1223 // We track whether we already emitted a `ChannelPending` event.
1224 channel_pending_event_emitted: bool,
1226 // We track whether we already emitted a `ChannelReady` event.
1227 channel_ready_event_emitted: bool,
1229 /// The unique identifier used to re-derive the private key material for the channel through
1230 /// [`SignerProvider::derive_channel_signer`].
1231 channel_keys_id: [u8; 32],
1233 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1234 /// store it here and only release it to the `ChannelManager` once it asks for it.
1235 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1238 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1239 /// Allowed in any state (including after shutdown)
1240 pub fn get_update_time_counter(&self) -> u32 {
1241 self.update_time_counter
1244 pub fn get_latest_monitor_update_id(&self) -> u64 {
1245 self.latest_monitor_update_id
1248 pub fn should_announce(&self) -> bool {
1249 self.config.announced_channel
1252 pub fn is_outbound(&self) -> bool {
1253 self.channel_transaction_parameters.is_outbound_from_holder
1256 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1257 /// Allowed in any state (including after shutdown)
1258 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1259 self.config.options.forwarding_fee_base_msat
1262 /// Returns true if we've ever received a message from the remote end for this Channel
1263 pub fn have_received_message(&self) -> bool {
1264 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1267 /// Returns true if this channel is fully established and not known to be closing.
1268 /// Allowed in any state (including after shutdown)
1269 pub fn is_usable(&self) -> bool {
1270 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1271 !self.channel_state.is_local_shutdown_sent() &&
1272 !self.channel_state.is_remote_shutdown_sent() &&
1273 !self.monitor_pending_channel_ready
1276 /// shutdown state returns the state of the channel in its various stages of shutdown
1277 pub fn shutdown_state(&self) -> ChannelShutdownState {
1278 match self.channel_state {
1279 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1280 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1281 ChannelShutdownState::ShutdownInitiated
1282 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1283 ChannelShutdownState::ResolvingHTLCs
1284 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1285 ChannelShutdownState::NegotiatingClosingFee
1287 ChannelShutdownState::NotShuttingDown
1289 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1290 _ => ChannelShutdownState::NotShuttingDown,
1294 fn closing_negotiation_ready(&self) -> bool {
1295 let is_ready_to_close = match self.channel_state {
1296 ChannelState::AwaitingChannelReady(flags) =>
1297 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1298 ChannelState::ChannelReady(flags) =>
1299 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1302 self.pending_inbound_htlcs.is_empty() &&
1303 self.pending_outbound_htlcs.is_empty() &&
1304 self.pending_update_fee.is_none() &&
1308 /// Returns true if this channel is currently available for use. This is a superset of
1309 /// is_usable() and considers things like the channel being temporarily disabled.
1310 /// Allowed in any state (including after shutdown)
1311 pub fn is_live(&self) -> bool {
1312 self.is_usable() && !self.channel_state.is_peer_disconnected()
1315 // Public utilities:
1317 pub fn channel_id(&self) -> ChannelId {
1321 // Return the `temporary_channel_id` used during channel establishment.
1323 // Will return `None` for channels created prior to LDK version 0.0.115.
1324 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1325 self.temporary_channel_id
1328 pub fn minimum_depth(&self) -> Option<u32> {
1332 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1333 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1334 pub fn get_user_id(&self) -> u128 {
1338 /// Gets the channel's type
1339 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1343 /// Gets the channel's `short_channel_id`.
1345 /// Will return `None` if the channel hasn't been confirmed yet.
1346 pub fn get_short_channel_id(&self) -> Option<u64> {
1347 self.short_channel_id
1350 /// Allowed in any state (including after shutdown)
1351 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1352 self.latest_inbound_scid_alias
1355 /// Allowed in any state (including after shutdown)
1356 pub fn outbound_scid_alias(&self) -> u64 {
1357 self.outbound_scid_alias
1360 /// Returns the holder signer for this channel.
1362 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1363 return &self.holder_signer
1366 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1367 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1368 /// or prior to any channel actions during `Channel` initialization.
1369 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1370 debug_assert_eq!(self.outbound_scid_alias, 0);
1371 self.outbound_scid_alias = outbound_scid_alias;
1374 /// Returns the funding_txo we either got from our peer, or were given by
1375 /// get_funding_created.
1376 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1377 self.channel_transaction_parameters.funding_outpoint
1380 /// Returns the height in which our funding transaction was confirmed.
1381 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1382 let conf_height = self.funding_tx_confirmation_height;
1383 if conf_height > 0 {
1390 /// Returns the block hash in which our funding transaction was confirmed.
1391 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1392 self.funding_tx_confirmed_in
1395 /// Returns the current number of confirmations on the funding transaction.
1396 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1397 if self.funding_tx_confirmation_height == 0 {
1398 // We either haven't seen any confirmation yet, or observed a reorg.
1402 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1405 fn get_holder_selected_contest_delay(&self) -> u16 {
1406 self.channel_transaction_parameters.holder_selected_contest_delay
1409 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1410 &self.channel_transaction_parameters.holder_pubkeys
1413 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1414 self.channel_transaction_parameters.counterparty_parameters
1415 .as_ref().map(|params| params.selected_contest_delay)
1418 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1419 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1422 /// Allowed in any state (including after shutdown)
1423 pub fn get_counterparty_node_id(&self) -> PublicKey {
1424 self.counterparty_node_id
1427 /// Allowed in any state (including after shutdown)
1428 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1429 self.holder_htlc_minimum_msat
1432 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1433 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1434 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1437 /// Allowed in any state (including after shutdown)
1438 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1440 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1441 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1442 // channel might have been used to route very small values (either by honest users or as DoS).
1443 self.channel_value_satoshis * 1000 * 9 / 10,
1445 self.counterparty_max_htlc_value_in_flight_msat
1449 /// Allowed in any state (including after shutdown)
1450 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1451 self.counterparty_htlc_minimum_msat
1454 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1455 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1456 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1459 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1460 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1461 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1463 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1464 party_max_htlc_value_in_flight_msat
1469 pub fn get_value_satoshis(&self) -> u64 {
1470 self.channel_value_satoshis
1473 pub fn get_fee_proportional_millionths(&self) -> u32 {
1474 self.config.options.forwarding_fee_proportional_millionths
1477 pub fn get_cltv_expiry_delta(&self) -> u16 {
1478 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1481 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1482 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1483 where F::Target: FeeEstimator
1485 match self.config.options.max_dust_htlc_exposure {
1486 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1487 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1488 ConfirmationTarget::OnChainSweep) as u64;
1489 feerate_per_kw.saturating_mul(multiplier)
1491 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1495 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1496 pub fn prev_config(&self) -> Option<ChannelConfig> {
1497 self.prev_config.map(|prev_config| prev_config.0)
1500 // Checks whether we should emit a `ChannelPending` event.
1501 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1502 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1505 // Returns whether we already emitted a `ChannelPending` event.
1506 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1507 self.channel_pending_event_emitted
1510 // Remembers that we already emitted a `ChannelPending` event.
1511 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1512 self.channel_pending_event_emitted = true;
1515 // Checks whether we should emit a `ChannelReady` event.
1516 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1517 self.is_usable() && !self.channel_ready_event_emitted
1520 // Remembers that we already emitted a `ChannelReady` event.
1521 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1522 self.channel_ready_event_emitted = true;
1525 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1526 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1527 /// no longer be considered when forwarding HTLCs.
1528 pub fn maybe_expire_prev_config(&mut self) {
1529 if self.prev_config.is_none() {
1532 let prev_config = self.prev_config.as_mut().unwrap();
1534 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1535 self.prev_config = None;
1539 /// Returns the current [`ChannelConfig`] applied to the channel.
1540 pub fn config(&self) -> ChannelConfig {
1544 /// Updates the channel's config. A bool is returned indicating whether the config update
1545 /// applied resulted in a new ChannelUpdate message.
1546 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1547 let did_channel_update =
1548 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1549 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1550 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1551 if did_channel_update {
1552 self.prev_config = Some((self.config.options, 0));
1553 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1554 // policy change to propagate throughout the network.
1555 self.update_time_counter += 1;
1557 self.config.options = *config;
1561 /// Returns true if funding_signed was sent/received and the
1562 /// funding transaction has been broadcast if necessary.
1563 pub fn is_funding_broadcast(&self) -> bool {
1564 !self.channel_state.is_pre_funded_state() &&
1565 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1568 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1569 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1570 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1571 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1572 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1574 /// @local is used only to convert relevant internal structures which refer to remote vs local
1575 /// to decide value of outputs and direction of HTLCs.
1576 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1577 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1578 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1579 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1580 /// which peer generated this transaction and "to whom" this transaction flows.
1582 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1583 where L::Target: Logger
1585 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1586 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1587 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1589 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1590 let mut remote_htlc_total_msat = 0;
1591 let mut local_htlc_total_msat = 0;
1592 let mut value_to_self_msat_offset = 0;
1594 let mut feerate_per_kw = self.feerate_per_kw;
1595 if let Some((feerate, update_state)) = self.pending_update_fee {
1596 if match update_state {
1597 // Note that these match the inclusion criteria when scanning
1598 // pending_inbound_htlcs below.
1599 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1600 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1601 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1603 feerate_per_kw = feerate;
1607 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1608 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1609 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1611 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1613 macro_rules! get_htlc_in_commitment {
1614 ($htlc: expr, $offered: expr) => {
1615 HTLCOutputInCommitment {
1617 amount_msat: $htlc.amount_msat,
1618 cltv_expiry: $htlc.cltv_expiry,
1619 payment_hash: $htlc.payment_hash,
1620 transaction_output_index: None
1625 macro_rules! add_htlc_output {
1626 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1627 if $outbound == local { // "offered HTLC output"
1628 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1629 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1632 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1634 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1635 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1636 included_non_dust_htlcs.push((htlc_in_tx, $source));
1638 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1639 included_dust_htlcs.push((htlc_in_tx, $source));
1642 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1643 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1646 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1648 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1649 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1650 included_non_dust_htlcs.push((htlc_in_tx, $source));
1652 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1653 included_dust_htlcs.push((htlc_in_tx, $source));
1659 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1661 for ref htlc in self.pending_inbound_htlcs.iter() {
1662 let (include, state_name) = match htlc.state {
1663 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1664 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1665 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1666 InboundHTLCState::Committed => (true, "Committed"),
1667 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1671 add_htlc_output!(htlc, false, None, state_name);
1672 remote_htlc_total_msat += htlc.amount_msat;
1674 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1676 &InboundHTLCState::LocalRemoved(ref reason) => {
1677 if generated_by_local {
1678 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1679 inbound_htlc_preimages.push(preimage);
1680 value_to_self_msat_offset += htlc.amount_msat as i64;
1690 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1692 for ref htlc in self.pending_outbound_htlcs.iter() {
1693 let (include, state_name) = match htlc.state {
1694 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1695 OutboundHTLCState::Committed => (true, "Committed"),
1696 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1697 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1698 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1701 let preimage_opt = match htlc.state {
1702 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1703 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1704 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1708 if let Some(preimage) = preimage_opt {
1709 outbound_htlc_preimages.push(preimage);
1713 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1714 local_htlc_total_msat += htlc.amount_msat;
1716 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1718 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1719 value_to_self_msat_offset -= htlc.amount_msat as i64;
1721 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1722 if !generated_by_local {
1723 value_to_self_msat_offset -= htlc.amount_msat as i64;
1731 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1732 assert!(value_to_self_msat >= 0);
1733 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1734 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1735 // "violate" their reserve value by couting those against it. Thus, we have to convert
1736 // everything to i64 before subtracting as otherwise we can overflow.
1737 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1738 assert!(value_to_remote_msat >= 0);
1740 #[cfg(debug_assertions)]
1742 // Make sure that the to_self/to_remote is always either past the appropriate
1743 // channel_reserve *or* it is making progress towards it.
1744 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1745 self.holder_max_commitment_tx_output.lock().unwrap()
1747 self.counterparty_max_commitment_tx_output.lock().unwrap()
1749 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1750 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1751 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1752 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1755 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1756 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1757 let (value_to_self, value_to_remote) = if self.is_outbound() {
1758 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1760 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1763 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1764 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1765 let (funding_pubkey_a, funding_pubkey_b) = if local {
1766 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1768 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1771 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1772 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1777 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1778 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1783 let num_nondust_htlcs = included_non_dust_htlcs.len();
1785 let channel_parameters =
1786 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1787 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1788 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1795 &mut included_non_dust_htlcs,
1798 let mut htlcs_included = included_non_dust_htlcs;
1799 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1800 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1801 htlcs_included.append(&mut included_dust_htlcs);
1803 // For the stats, trimmed-to-0 the value in msats accordingly
1804 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1805 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1813 local_balance_msat: value_to_self_msat as u64,
1814 remote_balance_msat: value_to_remote_msat as u64,
1815 inbound_htlc_preimages,
1816 outbound_htlc_preimages,
1821 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1822 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1823 /// our counterparty!)
1824 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1825 /// TODO Some magic rust shit to compile-time check this?
1826 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1827 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1828 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1829 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1830 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1832 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1836 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1837 /// will sign and send to our counterparty.
1838 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1839 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1840 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1841 //may see payments to it!
1842 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1843 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1844 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1846 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1849 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1850 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1851 /// Panics if called before accept_channel/InboundV1Channel::new
1852 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1853 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1856 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1857 &self.get_counterparty_pubkeys().funding_pubkey
1860 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1864 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1865 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1866 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1867 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1868 // more dust balance if the feerate increases when we have several HTLCs pending
1869 // which are near the dust limit.
1870 let mut feerate_per_kw = self.feerate_per_kw;
1871 // If there's a pending update fee, use it to ensure we aren't under-estimating
1872 // potential feerate updates coming soon.
1873 if let Some((feerate, _)) = self.pending_update_fee {
1874 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1876 if let Some(feerate) = outbound_feerate_update {
1877 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1879 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1882 /// Get forwarding information for the counterparty.
1883 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1884 self.counterparty_forwarding_info.clone()
1887 /// Returns a HTLCStats about inbound pending htlcs
1888 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1890 let mut stats = HTLCStats {
1891 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1892 pending_htlcs_value_msat: 0,
1893 on_counterparty_tx_dust_exposure_msat: 0,
1894 on_holder_tx_dust_exposure_msat: 0,
1895 holding_cell_msat: 0,
1896 on_holder_tx_holding_cell_htlcs_count: 0,
1899 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1902 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1903 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1904 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1906 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1907 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1908 for ref htlc in context.pending_inbound_htlcs.iter() {
1909 stats.pending_htlcs_value_msat += htlc.amount_msat;
1910 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1911 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1913 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1914 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1920 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1921 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1923 let mut stats = HTLCStats {
1924 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1925 pending_htlcs_value_msat: 0,
1926 on_counterparty_tx_dust_exposure_msat: 0,
1927 on_holder_tx_dust_exposure_msat: 0,
1928 holding_cell_msat: 0,
1929 on_holder_tx_holding_cell_htlcs_count: 0,
1932 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1935 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1936 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1937 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1939 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1940 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1941 for ref htlc in context.pending_outbound_htlcs.iter() {
1942 stats.pending_htlcs_value_msat += htlc.amount_msat;
1943 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1944 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1946 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1947 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1951 for update in context.holding_cell_htlc_updates.iter() {
1952 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1953 stats.pending_htlcs += 1;
1954 stats.pending_htlcs_value_msat += amount_msat;
1955 stats.holding_cell_msat += amount_msat;
1956 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1957 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1959 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1960 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1962 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1969 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1970 /// Doesn't bother handling the
1971 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1972 /// corner case properly.
1973 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1974 -> AvailableBalances
1975 where F::Target: FeeEstimator
1977 let context = &self;
1978 // Note that we have to handle overflow due to the above case.
1979 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1980 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1982 let mut balance_msat = context.value_to_self_msat;
1983 for ref htlc in context.pending_inbound_htlcs.iter() {
1984 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1985 balance_msat += htlc.amount_msat;
1988 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1990 let outbound_capacity_msat = context.value_to_self_msat
1991 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1993 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1995 let mut available_capacity_msat = outbound_capacity_msat;
1997 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1998 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2002 if context.is_outbound() {
2003 // We should mind channel commit tx fee when computing how much of the available capacity
2004 // can be used in the next htlc. Mirrors the logic in send_htlc.
2006 // The fee depends on whether the amount we will be sending is above dust or not,
2007 // and the answer will in turn change the amount itself — making it a circular
2009 // This complicates the computation around dust-values, up to the one-htlc-value.
2010 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2011 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2012 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2015 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2016 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2017 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2018 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2019 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2020 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2021 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2024 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2025 // value ends up being below dust, we have this fee available again. In that case,
2026 // match the value to right-below-dust.
2027 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2028 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2029 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2030 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2031 debug_assert!(one_htlc_difference_msat != 0);
2032 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2033 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2034 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2036 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2039 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2040 // sending a new HTLC won't reduce their balance below our reserve threshold.
2041 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2042 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2043 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2046 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2047 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2049 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2050 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2051 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2053 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2054 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2055 // we've selected for them, we can only send dust HTLCs.
2056 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2060 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2062 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2063 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2064 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2065 // send above the dust limit (as the router can always overpay to meet the dust limit).
2066 let mut remaining_msat_below_dust_exposure_limit = None;
2067 let mut dust_exposure_dust_limit_msat = 0;
2068 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2070 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2071 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2073 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2074 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2075 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2077 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2078 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2079 remaining_msat_below_dust_exposure_limit =
2080 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2081 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2084 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2085 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2086 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2087 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2088 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2089 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2092 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2093 if available_capacity_msat < dust_exposure_dust_limit_msat {
2094 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2096 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2100 available_capacity_msat = cmp::min(available_capacity_msat,
2101 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2103 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2104 available_capacity_msat = 0;
2108 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2109 - context.value_to_self_msat as i64
2110 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2111 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2113 outbound_capacity_msat,
2114 next_outbound_htlc_limit_msat: available_capacity_msat,
2115 next_outbound_htlc_minimum_msat,
2120 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2121 let context = &self;
2122 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2125 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2126 /// number of pending HTLCs that are on track to be in our next commitment tx.
2128 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2129 /// `fee_spike_buffer_htlc` is `Some`.
2131 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2132 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2134 /// Dust HTLCs are excluded.
2135 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2136 let context = &self;
2137 assert!(context.is_outbound());
2139 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2142 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2143 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2145 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2146 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2148 let mut addl_htlcs = 0;
2149 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2151 HTLCInitiator::LocalOffered => {
2152 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2156 HTLCInitiator::RemoteOffered => {
2157 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2163 let mut included_htlcs = 0;
2164 for ref htlc in context.pending_inbound_htlcs.iter() {
2165 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2168 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2169 // transaction including this HTLC if it times out before they RAA.
2170 included_htlcs += 1;
2173 for ref htlc in context.pending_outbound_htlcs.iter() {
2174 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2178 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2179 OutboundHTLCState::Committed => included_htlcs += 1,
2180 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2181 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2182 // transaction won't be generated until they send us their next RAA, which will mean
2183 // dropping any HTLCs in this state.
2188 for htlc in context.holding_cell_htlc_updates.iter() {
2190 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2191 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2196 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2197 // ack we're guaranteed to never include them in commitment txs anymore.
2201 let num_htlcs = included_htlcs + addl_htlcs;
2202 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2203 #[cfg(any(test, fuzzing))]
2206 if fee_spike_buffer_htlc.is_some() {
2207 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2209 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2210 + context.holding_cell_htlc_updates.len();
2211 let commitment_tx_info = CommitmentTxInfoCached {
2213 total_pending_htlcs,
2214 next_holder_htlc_id: match htlc.origin {
2215 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2216 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2218 next_counterparty_htlc_id: match htlc.origin {
2219 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2220 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2222 feerate: context.feerate_per_kw,
2224 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2229 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2230 /// pending HTLCs that are on track to be in their next commitment tx
2232 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2233 /// `fee_spike_buffer_htlc` is `Some`.
2235 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2236 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2238 /// Dust HTLCs are excluded.
2239 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2240 let context = &self;
2241 assert!(!context.is_outbound());
2243 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2246 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2247 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2249 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2250 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2252 let mut addl_htlcs = 0;
2253 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2255 HTLCInitiator::LocalOffered => {
2256 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2260 HTLCInitiator::RemoteOffered => {
2261 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2267 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2268 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2269 // committed outbound HTLCs, see below.
2270 let mut included_htlcs = 0;
2271 for ref htlc in context.pending_inbound_htlcs.iter() {
2272 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2275 included_htlcs += 1;
2278 for ref htlc in context.pending_outbound_htlcs.iter() {
2279 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2282 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2283 // i.e. if they've responded to us with an RAA after announcement.
2285 OutboundHTLCState::Committed => included_htlcs += 1,
2286 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2287 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2292 let num_htlcs = included_htlcs + addl_htlcs;
2293 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2294 #[cfg(any(test, fuzzing))]
2297 if fee_spike_buffer_htlc.is_some() {
2298 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2300 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2301 let commitment_tx_info = CommitmentTxInfoCached {
2303 total_pending_htlcs,
2304 next_holder_htlc_id: match htlc.origin {
2305 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2306 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2308 next_counterparty_htlc_id: match htlc.origin {
2309 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2310 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2312 feerate: context.feerate_per_kw,
2314 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2319 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2320 where F: Fn() -> Option<O> {
2321 match self.channel_state {
2322 ChannelState::FundingNegotiated => f(),
2323 ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) {
2332 /// Returns the transaction if there is a pending funding transaction that is yet to be
2334 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2335 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2338 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2340 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2341 self.if_unbroadcasted_funding(||
2342 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2346 /// Returns whether the channel is funded in a batch.
2347 pub fn is_batch_funding(&self) -> bool {
2348 self.is_batch_funding.is_some()
2351 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2353 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2354 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2357 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2358 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2359 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2360 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2361 /// immediately (others we will have to allow to time out).
2362 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2363 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2364 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2365 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2366 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2367 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2369 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2370 // return them to fail the payment.
2371 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2372 let counterparty_node_id = self.get_counterparty_node_id();
2373 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2375 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2376 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2381 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2382 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2383 // returning a channel monitor update here would imply a channel monitor update before
2384 // we even registered the channel monitor to begin with, which is invalid.
2385 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2386 // funding transaction, don't return a funding txo (which prevents providing the
2387 // monitor update to the user, even if we return one).
2388 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2389 let generate_monitor_update = match self.channel_state {
2390 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
2393 if generate_monitor_update {
2394 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2395 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2396 update_id: self.latest_monitor_update_id,
2397 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2401 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2403 self.channel_state = ChannelState::ShutdownComplete;
2404 self.update_time_counter += 1;
2407 dropped_outbound_htlcs,
2408 unbroadcasted_batch_funding_txid,
2409 channel_id: self.channel_id,
2410 counterparty_node_id: self.counterparty_node_id,
2414 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2415 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2416 let counterparty_keys = self.build_remote_transaction_keys();
2417 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2419 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2420 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2421 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2422 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2424 match &self.holder_signer {
2425 // TODO (arik): move match into calling method for Taproot
2426 ChannelSignerType::Ecdsa(ecdsa) => {
2427 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2428 .map(|(signature, _)| msgs::FundingSigned {
2429 channel_id: self.channel_id(),
2432 partial_signature_with_nonce: None,
2436 if funding_signed.is_none() {
2437 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2438 self.signer_pending_funding = true;
2439 } else if self.signer_pending_funding {
2440 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2441 self.signer_pending_funding = false;
2444 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2445 (counterparty_initial_commitment_tx, funding_signed)
2447 // TODO (taproot|arik)
2454 // Internal utility functions for channels
2456 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2457 /// `channel_value_satoshis` in msat, set through
2458 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2460 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2462 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2463 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2464 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2466 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2469 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2471 channel_value_satoshis * 10 * configured_percent
2474 /// Returns a minimum channel reserve value the remote needs to maintain,
2475 /// required by us according to the configured or default
2476 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2478 /// Guaranteed to return a value no larger than channel_value_satoshis
2480 /// This is used both for outbound and inbound channels and has lower bound
2481 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2482 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2483 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2484 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2487 /// This is for legacy reasons, present for forward-compatibility.
2488 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2489 /// from storage. Hence, we use this function to not persist default values of
2490 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2491 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2492 let (q, _) = channel_value_satoshis.overflowing_div(100);
2493 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2496 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2497 // Note that num_htlcs should not include dust HTLCs.
2499 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2500 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2503 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2504 // Note that num_htlcs should not include dust HTLCs.
2505 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2506 // Note that we need to divide before multiplying to round properly,
2507 // since the lowest denomination of bitcoin on-chain is the satoshi.
2508 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2511 // Holder designates channel data owned for the benefit of the user client.
2512 // Counterparty designates channel data owned by the another channel participant entity.
2513 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2514 pub context: ChannelContext<SP>,
2517 #[cfg(any(test, fuzzing))]
2518 struct CommitmentTxInfoCached {
2520 total_pending_htlcs: usize,
2521 next_holder_htlc_id: u64,
2522 next_counterparty_htlc_id: u64,
2526 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
2527 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
2528 trait FailHTLCContents {
2529 type Message: FailHTLCMessageName;
2530 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
2531 fn to_inbound_htlc_state(self) -> InboundHTLCState;
2532 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
2534 impl FailHTLCContents for msgs::OnionErrorPacket {
2535 type Message = msgs::UpdateFailHTLC;
2536 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2537 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
2539 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2540 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
2542 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2543 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
2546 impl FailHTLCContents for ([u8; 32], u16) {
2547 type Message = msgs::UpdateFailMalformedHTLC; // (failure_code, sha256_of_onion)
2548 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2549 msgs::UpdateFailMalformedHTLC {
2552 sha256_of_onion: self.0,
2553 failure_code: self.1
2556 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2557 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
2559 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2560 HTLCUpdateAwaitingACK::FailMalformedHTLC {
2562 sha256_of_onion: self.0,
2563 failure_code: self.1
2568 trait FailHTLCMessageName {
2569 fn name() -> &'static str;
2571 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
2572 fn name() -> &'static str {
2576 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
2577 fn name() -> &'static str {
2578 "update_fail_malformed_htlc"
2582 impl<SP: Deref> Channel<SP> where
2583 SP::Target: SignerProvider,
2584 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2586 fn check_remote_fee<F: Deref, L: Deref>(
2587 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2588 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2589 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2591 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2592 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2594 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2596 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2597 if feerate_per_kw < lower_limit {
2598 if let Some(cur_feerate) = cur_feerate_per_kw {
2599 if feerate_per_kw > cur_feerate {
2601 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2602 cur_feerate, feerate_per_kw);
2606 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2612 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2613 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2614 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2615 // outside of those situations will fail.
2616 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2620 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2625 1 + // script length (0)
2629 )*4 + // * 4 for non-witness parts
2630 2 + // witness marker and flag
2631 1 + // witness element count
2632 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2633 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2634 2*(1 + 71); // two signatures + sighash type flags
2635 if let Some(spk) = a_scriptpubkey {
2636 ret += ((8+1) + // output values and script length
2637 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2639 if let Some(spk) = b_scriptpubkey {
2640 ret += ((8+1) + // output values and script length
2641 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2647 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2648 assert!(self.context.pending_inbound_htlcs.is_empty());
2649 assert!(self.context.pending_outbound_htlcs.is_empty());
2650 assert!(self.context.pending_update_fee.is_none());
2652 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2653 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2654 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2656 if value_to_holder < 0 {
2657 assert!(self.context.is_outbound());
2658 total_fee_satoshis += (-value_to_holder) as u64;
2659 } else if value_to_counterparty < 0 {
2660 assert!(!self.context.is_outbound());
2661 total_fee_satoshis += (-value_to_counterparty) as u64;
2664 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2665 value_to_counterparty = 0;
2668 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2669 value_to_holder = 0;
2672 assert!(self.context.shutdown_scriptpubkey.is_some());
2673 let holder_shutdown_script = self.get_closing_scriptpubkey();
2674 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2675 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2677 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2678 (closing_transaction, total_fee_satoshis)
2681 fn funding_outpoint(&self) -> OutPoint {
2682 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2685 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2688 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2689 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2691 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2693 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2694 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2695 where L::Target: Logger {
2696 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2697 // (see equivalent if condition there).
2698 assert!(self.context.channel_state.should_force_holding_cell());
2699 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2700 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2701 self.context.latest_monitor_update_id = mon_update_id;
2702 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2703 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2707 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2708 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2709 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2710 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2712 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2713 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2716 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2717 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2718 // these, but for now we just have to treat them as normal.
2720 let mut pending_idx = core::usize::MAX;
2721 let mut htlc_value_msat = 0;
2722 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2723 if htlc.htlc_id == htlc_id_arg {
2724 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2725 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2726 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2728 InboundHTLCState::Committed => {},
2729 InboundHTLCState::LocalRemoved(ref reason) => {
2730 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2732 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2733 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2735 return UpdateFulfillFetch::DuplicateClaim {};
2738 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2739 // Don't return in release mode here so that we can update channel_monitor
2743 htlc_value_msat = htlc.amount_msat;
2747 if pending_idx == core::usize::MAX {
2748 #[cfg(any(test, fuzzing))]
2749 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2750 // this is simply a duplicate claim, not previously failed and we lost funds.
2751 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2752 return UpdateFulfillFetch::DuplicateClaim {};
2755 // Now update local state:
2757 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2758 // can claim it even if the channel hits the chain before we see their next commitment.
2759 self.context.latest_monitor_update_id += 1;
2760 let monitor_update = ChannelMonitorUpdate {
2761 update_id: self.context.latest_monitor_update_id,
2762 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2763 payment_preimage: payment_preimage_arg.clone(),
2767 if self.context.channel_state.should_force_holding_cell() {
2768 // Note that this condition is the same as the assertion in
2769 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2770 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2771 // do not not get into this branch.
2772 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2773 match pending_update {
2774 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2775 if htlc_id_arg == htlc_id {
2776 // Make sure we don't leave latest_monitor_update_id incremented here:
2777 self.context.latest_monitor_update_id -= 1;
2778 #[cfg(any(test, fuzzing))]
2779 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2780 return UpdateFulfillFetch::DuplicateClaim {};
2783 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2784 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2786 if htlc_id_arg == htlc_id {
2787 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2788 // TODO: We may actually be able to switch to a fulfill here, though its
2789 // rare enough it may not be worth the complexity burden.
2790 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2791 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2797 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
2798 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2799 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2801 #[cfg(any(test, fuzzing))]
2802 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2803 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2805 #[cfg(any(test, fuzzing))]
2806 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2809 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2810 if let InboundHTLCState::Committed = htlc.state {
2812 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2813 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2815 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2816 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2819 UpdateFulfillFetch::NewClaim {
2822 msg: Some(msgs::UpdateFulfillHTLC {
2823 channel_id: self.context.channel_id(),
2824 htlc_id: htlc_id_arg,
2825 payment_preimage: payment_preimage_arg,
2830 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2831 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2832 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2833 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2834 // Even if we aren't supposed to let new monitor updates with commitment state
2835 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2836 // matter what. Sadly, to push a new monitor update which flies before others
2837 // already queued, we have to insert it into the pending queue and update the
2838 // update_ids of all the following monitors.
2839 if release_cs_monitor && msg.is_some() {
2840 let mut additional_update = self.build_commitment_no_status_check(logger);
2841 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2842 // to be strictly increasing by one, so decrement it here.
2843 self.context.latest_monitor_update_id = monitor_update.update_id;
2844 monitor_update.updates.append(&mut additional_update.updates);
2846 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2847 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2848 monitor_update.update_id = new_mon_id;
2849 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2850 held_update.update.update_id += 1;
2853 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2854 let update = self.build_commitment_no_status_check(logger);
2855 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2861 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2862 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2864 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2868 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2869 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2870 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2871 /// before we fail backwards.
2873 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2874 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2875 /// [`ChannelError::Ignore`].
2876 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2877 -> Result<(), ChannelError> where L::Target: Logger {
2878 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2879 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2882 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
2883 /// want to fail blinded HTLCs where we are not the intro node.
2885 /// See [`Self::queue_fail_htlc`] for more info.
2886 pub fn queue_fail_malformed_htlc<L: Deref>(
2887 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
2888 ) -> Result<(), ChannelError> where L::Target: Logger {
2889 self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
2890 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2893 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2894 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2895 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2896 /// before we fail backwards.
2898 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2899 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2900 /// [`ChannelError::Ignore`].
2901 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
2902 &mut self, htlc_id_arg: u64, err_packet: E, mut force_holding_cell: bool,
2904 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
2905 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2906 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2909 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2910 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2911 // these, but for now we just have to treat them as normal.
2913 let mut pending_idx = core::usize::MAX;
2914 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2915 if htlc.htlc_id == htlc_id_arg {
2917 InboundHTLCState::Committed => {},
2918 InboundHTLCState::LocalRemoved(ref reason) => {
2919 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2921 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2926 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2927 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2933 if pending_idx == core::usize::MAX {
2934 #[cfg(any(test, fuzzing))]
2935 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2936 // is simply a duplicate fail, not previously failed and we failed-back too early.
2937 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2941 if self.context.channel_state.should_force_holding_cell() {
2942 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2943 force_holding_cell = true;
2946 // Now update local state:
2947 if force_holding_cell {
2948 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2949 match pending_update {
2950 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2951 if htlc_id_arg == htlc_id {
2952 #[cfg(any(test, fuzzing))]
2953 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2957 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2958 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2960 if htlc_id_arg == htlc_id {
2961 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2962 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2968 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2969 self.context.holding_cell_htlc_updates.push(err_packet.to_htlc_update_awaiting_ack(htlc_id_arg));
2973 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
2974 E::Message::name(), &self.context.channel_id());
2976 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2977 htlc.state = err_packet.clone().to_inbound_htlc_state();
2980 Ok(Some(err_packet.to_message(htlc_id_arg, self.context.channel_id())))
2983 // Message handlers:
2984 /// Updates the state of the channel to indicate that all channels in the batch have received
2985 /// funding_signed and persisted their monitors.
2986 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2987 /// treated as a non-batch channel going forward.
2988 pub fn set_batch_ready(&mut self) {
2989 self.context.is_batch_funding = None;
2990 self.context.channel_state.clear_waiting_for_batch();
2993 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2994 /// and the channel is now usable (and public), this may generate an announcement_signatures to
2996 pub fn channel_ready<NS: Deref, L: Deref>(
2997 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2998 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2999 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3001 NS::Target: NodeSigner,
3004 if self.context.channel_state.is_peer_disconnected() {
3005 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3006 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3009 if let Some(scid_alias) = msg.short_channel_id_alias {
3010 if Some(scid_alias) != self.context.short_channel_id {
3011 // The scid alias provided can be used to route payments *from* our counterparty,
3012 // i.e. can be used for inbound payments and provided in invoices, but is not used
3013 // when routing outbound payments.
3014 self.context.latest_inbound_scid_alias = Some(scid_alias);
3018 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3019 // batch, but we can receive channel_ready messages.
3020 let mut check_reconnection = false;
3021 match &self.context.channel_state {
3022 ChannelState::AwaitingChannelReady(flags) => {
3023 let flags = *flags & !FundedStateFlags::ALL;
3024 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3025 if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3026 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3027 check_reconnection = true;
3028 } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3029 self.context.channel_state.set_their_channel_ready();
3030 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3031 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3032 self.context.update_time_counter += 1;
3034 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3035 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3038 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3039 ChannelState::ChannelReady(_) => check_reconnection = true,
3040 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
3042 if check_reconnection {
3043 // They probably disconnected/reconnected and re-sent the channel_ready, which is
3044 // required, or they're sending a fresh SCID alias.
3045 let expected_point =
3046 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3047 // If they haven't ever sent an updated point, the point they send should match
3049 self.context.counterparty_cur_commitment_point
3050 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3051 // If we've advanced the commitment number once, the second commitment point is
3052 // at `counterparty_prev_commitment_point`, which is not yet revoked.
3053 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3054 self.context.counterparty_prev_commitment_point
3056 // If they have sent updated points, channel_ready is always supposed to match
3057 // their "first" point, which we re-derive here.
3058 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3059 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3060 ).expect("We already advanced, so previous secret keys should have been validated already")))
3062 if expected_point != Some(msg.next_per_commitment_point) {
3063 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3068 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3069 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3071 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3073 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3076 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3077 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3078 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3079 ) -> Result<(), ChannelError>
3080 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3081 FE::Target: FeeEstimator, L::Target: Logger,
3083 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3084 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3086 // We can't accept HTLCs sent after we've sent a shutdown.
3087 if self.context.channel_state.is_local_shutdown_sent() {
3088 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3090 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3091 if self.context.channel_state.is_remote_shutdown_sent() {
3092 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3094 if self.context.channel_state.is_peer_disconnected() {
3095 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3097 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3098 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3100 if msg.amount_msat == 0 {
3101 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3103 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3104 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3107 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3108 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3109 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3110 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3112 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3113 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3116 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3117 // the reserve_satoshis we told them to always have as direct payment so that they lose
3118 // something if we punish them for broadcasting an old state).
3119 // Note that we don't really care about having a small/no to_remote output in our local
3120 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3121 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3122 // present in the next commitment transaction we send them (at least for fulfilled ones,
3123 // failed ones won't modify value_to_self).
3124 // Note that we will send HTLCs which another instance of rust-lightning would think
3125 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3126 // Channel state once they will not be present in the next received commitment
3128 let mut removed_outbound_total_msat = 0;
3129 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3130 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3131 removed_outbound_total_msat += htlc.amount_msat;
3132 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3133 removed_outbound_total_msat += htlc.amount_msat;
3137 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3138 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3141 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3142 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3143 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3145 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3146 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3147 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3148 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3149 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3150 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3151 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3155 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3156 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3157 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3158 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3159 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3160 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3161 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3165 let pending_value_to_self_msat =
3166 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3167 let pending_remote_value_msat =
3168 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3169 if pending_remote_value_msat < msg.amount_msat {
3170 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3173 // Check that the remote can afford to pay for this HTLC on-chain at the current
3174 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3176 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3177 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3178 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3180 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3181 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3185 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3186 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3188 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3189 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3193 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3194 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3198 if !self.context.is_outbound() {
3199 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3200 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3201 // side, only on the sender's. Note that with anchor outputs we are no longer as
3202 // sensitive to fee spikes, so we need to account for them.
3203 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3204 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3205 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3206 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3208 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3209 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3210 // the HTLC, i.e. its status is already set to failing.
3211 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3212 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3215 // Check that they won't violate our local required channel reserve by adding this HTLC.
3216 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3217 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3218 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3219 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3222 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3223 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3225 if msg.cltv_expiry >= 500000000 {
3226 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3229 if self.context.channel_state.is_local_shutdown_sent() {
3230 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3231 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3235 // Now update local state:
3236 self.context.next_counterparty_htlc_id += 1;
3237 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3238 htlc_id: msg.htlc_id,
3239 amount_msat: msg.amount_msat,
3240 payment_hash: msg.payment_hash,
3241 cltv_expiry: msg.cltv_expiry,
3242 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3247 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3249 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3250 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3251 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3252 if htlc.htlc_id == htlc_id {
3253 let outcome = match check_preimage {
3254 None => fail_reason.into(),
3255 Some(payment_preimage) => {
3256 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3257 if payment_hash != htlc.payment_hash {
3258 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3260 OutboundHTLCOutcome::Success(Some(payment_preimage))
3264 OutboundHTLCState::LocalAnnounced(_) =>
3265 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3266 OutboundHTLCState::Committed => {
3267 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3269 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3270 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3275 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3278 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3279 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3280 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3282 if self.context.channel_state.is_peer_disconnected() {
3283 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3286 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3289 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3290 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3291 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3293 if self.context.channel_state.is_peer_disconnected() {
3294 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3297 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3301 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3302 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3303 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3305 if self.context.channel_state.is_peer_disconnected() {
3306 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3309 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3313 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3314 where L::Target: Logger
3316 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3317 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3319 if self.context.channel_state.is_peer_disconnected() {
3320 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3322 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3323 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3326 let funding_script = self.context.get_funding_redeemscript();
3328 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3330 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3331 let commitment_txid = {
3332 let trusted_tx = commitment_stats.tx.trust();
3333 let bitcoin_tx = trusted_tx.built_transaction();
3334 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3336 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3337 log_bytes!(msg.signature.serialize_compact()[..]),
3338 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3339 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3340 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3341 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3345 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3347 // If our counterparty updated the channel fee in this commitment transaction, check that
3348 // they can actually afford the new fee now.
3349 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3350 update_state == FeeUpdateState::RemoteAnnounced
3353 debug_assert!(!self.context.is_outbound());
3354 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3355 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3356 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3359 #[cfg(any(test, fuzzing))]
3361 if self.context.is_outbound() {
3362 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3363 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3364 if let Some(info) = projected_commit_tx_info {
3365 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3366 + self.context.holding_cell_htlc_updates.len();
3367 if info.total_pending_htlcs == total_pending_htlcs
3368 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3369 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3370 && info.feerate == self.context.feerate_per_kw {
3371 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3377 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3378 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3381 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3382 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3383 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3384 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3385 // backwards compatibility, we never use it in production. To provide test coverage, here,
3386 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3387 #[allow(unused_assignments, unused_mut)]
3388 let mut separate_nondust_htlc_sources = false;
3389 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3390 use core::hash::{BuildHasher, Hasher};
3391 // Get a random value using the only std API to do so - the DefaultHasher
3392 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3393 separate_nondust_htlc_sources = rand_val % 2 == 0;
3396 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3397 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3398 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3399 if let Some(_) = htlc.transaction_output_index {
3400 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3401 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3402 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3404 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3405 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3406 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3407 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3408 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3409 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3410 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3411 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3413 if !separate_nondust_htlc_sources {
3414 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3417 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3419 if separate_nondust_htlc_sources {
3420 if let Some(source) = source_opt.take() {
3421 nondust_htlc_sources.push(source);
3424 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3427 let holder_commitment_tx = HolderCommitmentTransaction::new(
3428 commitment_stats.tx,
3430 msg.htlc_signatures.clone(),
3431 &self.context.get_holder_pubkeys().funding_pubkey,
3432 self.context.counterparty_funding_pubkey()
3435 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3436 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3438 // Update state now that we've passed all the can-fail calls...
3439 let mut need_commitment = false;
3440 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3441 if *update_state == FeeUpdateState::RemoteAnnounced {
3442 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3443 need_commitment = true;
3447 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3448 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3449 Some(forward_info.clone())
3451 if let Some(forward_info) = new_forward {
3452 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3453 &htlc.payment_hash, &self.context.channel_id);
3454 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3455 need_commitment = true;
3458 let mut claimed_htlcs = Vec::new();
3459 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3460 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3461 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3462 &htlc.payment_hash, &self.context.channel_id);
3463 // Grab the preimage, if it exists, instead of cloning
3464 let mut reason = OutboundHTLCOutcome::Success(None);
3465 mem::swap(outcome, &mut reason);
3466 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3467 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3468 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3469 // have a `Success(None)` reason. In this case we could forget some HTLC
3470 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3471 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3473 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3475 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3476 need_commitment = true;
3480 self.context.latest_monitor_update_id += 1;
3481 let mut monitor_update = ChannelMonitorUpdate {
3482 update_id: self.context.latest_monitor_update_id,
3483 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3484 commitment_tx: holder_commitment_tx,
3485 htlc_outputs: htlcs_and_sigs,
3487 nondust_htlc_sources,
3491 self.context.cur_holder_commitment_transaction_number -= 1;
3492 self.context.expecting_peer_commitment_signed = false;
3493 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3494 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3495 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3497 if self.context.channel_state.is_monitor_update_in_progress() {
3498 // In case we initially failed monitor updating without requiring a response, we need
3499 // to make sure the RAA gets sent first.
3500 self.context.monitor_pending_revoke_and_ack = true;
3501 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3502 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3503 // the corresponding HTLC status updates so that
3504 // get_last_commitment_update_for_send includes the right HTLCs.
3505 self.context.monitor_pending_commitment_signed = true;
3506 let mut additional_update = self.build_commitment_no_status_check(logger);
3507 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3508 // strictly increasing by one, so decrement it here.
3509 self.context.latest_monitor_update_id = monitor_update.update_id;
3510 monitor_update.updates.append(&mut additional_update.updates);
3512 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3513 &self.context.channel_id);
3514 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3517 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3518 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3519 // we'll send one right away when we get the revoke_and_ack when we
3520 // free_holding_cell_htlcs().
3521 let mut additional_update = self.build_commitment_no_status_check(logger);
3522 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3523 // strictly increasing by one, so decrement it here.
3524 self.context.latest_monitor_update_id = monitor_update.update_id;
3525 monitor_update.updates.append(&mut additional_update.updates);
3529 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3530 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3531 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3532 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3535 /// Public version of the below, checking relevant preconditions first.
3536 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3537 /// returns `(None, Vec::new())`.
3538 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3539 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3540 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3541 where F::Target: FeeEstimator, L::Target: Logger
3543 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
3544 self.free_holding_cell_htlcs(fee_estimator, logger)
3545 } else { (None, Vec::new()) }
3548 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3549 /// for our counterparty.
3550 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3551 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3552 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3553 where F::Target: FeeEstimator, L::Target: Logger
3555 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3556 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3557 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3558 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3560 let mut monitor_update = ChannelMonitorUpdate {
3561 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3562 updates: Vec::new(),
3565 let mut htlc_updates = Vec::new();
3566 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3567 let mut update_add_count = 0;
3568 let mut update_fulfill_count = 0;
3569 let mut update_fail_count = 0;
3570 let mut htlcs_to_fail = Vec::new();
3571 for htlc_update in htlc_updates.drain(..) {
3572 // Note that this *can* fail, though it should be due to rather-rare conditions on
3573 // fee races with adding too many outputs which push our total payments just over
3574 // the limit. In case it's less rare than I anticipate, we may want to revisit
3575 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3576 // to rebalance channels.
3577 let fail_htlc_res = match &htlc_update {
3578 &HTLCUpdateAwaitingACK::AddHTLC {
3579 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3580 skimmed_fee_msat, blinding_point, ..
3582 match self.send_htlc(
3583 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3584 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3586 Ok(_) => update_add_count += 1,
3589 ChannelError::Ignore(ref msg) => {
3590 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3591 // If we fail to send here, then this HTLC should
3592 // be failed backwards. Failing to send here
3593 // indicates that this HTLC may keep being put back
3594 // into the holding cell without ever being
3595 // successfully forwarded/failed/fulfilled, causing
3596 // our counterparty to eventually close on us.
3597 htlcs_to_fail.push((source.clone(), *payment_hash));
3600 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3607 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3608 // If an HTLC claim was previously added to the holding cell (via
3609 // `get_update_fulfill_htlc`, then generating the claim message itself must
3610 // not fail - any in between attempts to claim the HTLC will have resulted
3611 // in it hitting the holding cell again and we cannot change the state of a
3612 // holding cell HTLC from fulfill to anything else.
3613 let mut additional_monitor_update =
3614 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3615 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3616 { monitor_update } else { unreachable!() };
3617 update_fulfill_count += 1;
3618 monitor_update.updates.append(&mut additional_monitor_update.updates);
3621 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3622 Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
3623 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
3625 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
3626 Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
3627 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
3630 if let Some(res) = fail_htlc_res {
3632 Ok(fail_msg_opt) => {
3633 // If an HTLC failure was previously added to the holding cell (via
3634 // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
3635 // not fail - we should never end up in a state where we double-fail
3636 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3637 // for a full revocation before failing.
3638 debug_assert!(fail_msg_opt.is_some());
3639 update_fail_count += 1;
3641 Err(ChannelError::Ignore(_)) => {},
3643 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3648 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3649 return (None, htlcs_to_fail);
3651 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3652 self.send_update_fee(feerate, false, fee_estimator, logger)
3657 let mut additional_update = self.build_commitment_no_status_check(logger);
3658 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3659 // but we want them to be strictly increasing by one, so reset it here.
3660 self.context.latest_monitor_update_id = monitor_update.update_id;
3661 monitor_update.updates.append(&mut additional_update.updates);
3663 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3664 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3665 update_add_count, update_fulfill_count, update_fail_count);
3667 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3668 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3674 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3675 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3676 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3677 /// generating an appropriate error *after* the channel state has been updated based on the
3678 /// revoke_and_ack message.
3679 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3680 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3681 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3682 where F::Target: FeeEstimator, L::Target: Logger,
3684 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3685 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3687 if self.context.channel_state.is_peer_disconnected() {
3688 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3690 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3691 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3694 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3696 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3697 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3698 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3702 if !self.context.channel_state.is_awaiting_remote_revoke() {
3703 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3704 // haven't given them a new commitment transaction to broadcast). We should probably
3705 // take advantage of this by updating our channel monitor, sending them an error, and
3706 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3707 // lot of work, and there's some chance this is all a misunderstanding anyway.
3708 // We have to do *something*, though, since our signer may get mad at us for otherwise
3709 // jumping a remote commitment number, so best to just force-close and move on.
3710 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3713 #[cfg(any(test, fuzzing))]
3715 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3716 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3719 match &self.context.holder_signer {
3720 ChannelSignerType::Ecdsa(ecdsa) => {
3721 ecdsa.validate_counterparty_revocation(
3722 self.context.cur_counterparty_commitment_transaction_number + 1,
3724 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3726 // TODO (taproot|arik)
3731 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3732 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3733 self.context.latest_monitor_update_id += 1;
3734 let mut monitor_update = ChannelMonitorUpdate {
3735 update_id: self.context.latest_monitor_update_id,
3736 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3737 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3738 secret: msg.per_commitment_secret,
3742 // Update state now that we've passed all the can-fail calls...
3743 // (note that we may still fail to generate the new commitment_signed message, but that's
3744 // OK, we step the channel here and *then* if the new generation fails we can fail the
3745 // channel based on that, but stepping stuff here should be safe either way.
3746 self.context.channel_state.clear_awaiting_remote_revoke();
3747 self.context.sent_message_awaiting_response = None;
3748 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3749 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3750 self.context.cur_counterparty_commitment_transaction_number -= 1;
3752 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3753 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3756 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3757 let mut to_forward_infos = Vec::new();
3758 let mut revoked_htlcs = Vec::new();
3759 let mut finalized_claimed_htlcs = Vec::new();
3760 let mut update_fail_htlcs = Vec::new();
3761 let mut update_fail_malformed_htlcs = Vec::new();
3762 let mut require_commitment = false;
3763 let mut value_to_self_msat_diff: i64 = 0;
3766 // Take references explicitly so that we can hold multiple references to self.context.
3767 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3768 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3769 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3771 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3772 pending_inbound_htlcs.retain(|htlc| {
3773 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3774 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3775 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3776 value_to_self_msat_diff += htlc.amount_msat as i64;
3778 *expecting_peer_commitment_signed = true;
3782 pending_outbound_htlcs.retain(|htlc| {
3783 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3784 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3785 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3786 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3788 finalized_claimed_htlcs.push(htlc.source.clone());
3789 // They fulfilled, so we sent them money
3790 value_to_self_msat_diff -= htlc.amount_msat as i64;
3795 for htlc in pending_inbound_htlcs.iter_mut() {
3796 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3798 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3802 let mut state = InboundHTLCState::Committed;
3803 mem::swap(&mut state, &mut htlc.state);
3805 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3806 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3807 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3808 require_commitment = true;
3809 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3810 match forward_info {
3811 PendingHTLCStatus::Fail(fail_msg) => {
3812 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3813 require_commitment = true;
3815 HTLCFailureMsg::Relay(msg) => {
3816 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3817 update_fail_htlcs.push(msg)
3819 HTLCFailureMsg::Malformed(msg) => {
3820 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3821 update_fail_malformed_htlcs.push(msg)
3825 PendingHTLCStatus::Forward(forward_info) => {
3826 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3827 to_forward_infos.push((forward_info, htlc.htlc_id));
3828 htlc.state = InboundHTLCState::Committed;
3834 for htlc in pending_outbound_htlcs.iter_mut() {
3835 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3836 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3837 htlc.state = OutboundHTLCState::Committed;
3838 *expecting_peer_commitment_signed = true;
3840 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3841 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3842 // Grab the preimage, if it exists, instead of cloning
3843 let mut reason = OutboundHTLCOutcome::Success(None);
3844 mem::swap(outcome, &mut reason);
3845 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3846 require_commitment = true;
3850 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3852 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3853 match update_state {
3854 FeeUpdateState::Outbound => {
3855 debug_assert!(self.context.is_outbound());
3856 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3857 self.context.feerate_per_kw = feerate;
3858 self.context.pending_update_fee = None;
3859 self.context.expecting_peer_commitment_signed = true;
3861 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3862 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3863 debug_assert!(!self.context.is_outbound());
3864 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3865 require_commitment = true;
3866 self.context.feerate_per_kw = feerate;
3867 self.context.pending_update_fee = None;
3872 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3873 let release_state_str =
3874 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3875 macro_rules! return_with_htlcs_to_fail {
3876 ($htlcs_to_fail: expr) => {
3877 if !release_monitor {
3878 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3879 update: monitor_update,
3881 return Ok(($htlcs_to_fail, None));
3883 return Ok(($htlcs_to_fail, Some(monitor_update)));
3888 if self.context.channel_state.is_monitor_update_in_progress() {
3889 // We can't actually generate a new commitment transaction (incl by freeing holding
3890 // cells) while we can't update the monitor, so we just return what we have.
3891 if require_commitment {
3892 self.context.monitor_pending_commitment_signed = true;
3893 // When the monitor updating is restored we'll call
3894 // get_last_commitment_update_for_send(), which does not update state, but we're
3895 // definitely now awaiting a remote revoke before we can step forward any more, so
3897 let mut additional_update = self.build_commitment_no_status_check(logger);
3898 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3899 // strictly increasing by one, so decrement it here.
3900 self.context.latest_monitor_update_id = monitor_update.update_id;
3901 monitor_update.updates.append(&mut additional_update.updates);
3903 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3904 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3905 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3906 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3907 return_with_htlcs_to_fail!(Vec::new());
3910 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3911 (Some(mut additional_update), htlcs_to_fail) => {
3912 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3913 // strictly increasing by one, so decrement it here.
3914 self.context.latest_monitor_update_id = monitor_update.update_id;
3915 monitor_update.updates.append(&mut additional_update.updates);
3917 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3918 &self.context.channel_id(), release_state_str);
3920 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3921 return_with_htlcs_to_fail!(htlcs_to_fail);
3923 (None, htlcs_to_fail) => {
3924 if require_commitment {
3925 let mut additional_update = self.build_commitment_no_status_check(logger);
3927 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3928 // strictly increasing by one, so decrement it here.
3929 self.context.latest_monitor_update_id = monitor_update.update_id;
3930 monitor_update.updates.append(&mut additional_update.updates);
3932 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3933 &self.context.channel_id(),
3934 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3937 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3938 return_with_htlcs_to_fail!(htlcs_to_fail);
3940 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3941 &self.context.channel_id(), release_state_str);
3943 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3944 return_with_htlcs_to_fail!(htlcs_to_fail);
3950 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3951 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3952 /// commitment update.
3953 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3954 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3955 where F::Target: FeeEstimator, L::Target: Logger
3957 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3958 assert!(msg_opt.is_none(), "We forced holding cell?");
3961 /// Adds a pending update to this channel. See the doc for send_htlc for
3962 /// further details on the optionness of the return value.
3963 /// If our balance is too low to cover the cost of the next commitment transaction at the
3964 /// new feerate, the update is cancelled.
3966 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3967 /// [`Channel`] if `force_holding_cell` is false.
3968 fn send_update_fee<F: Deref, L: Deref>(
3969 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3970 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3971 ) -> Option<msgs::UpdateFee>
3972 where F::Target: FeeEstimator, L::Target: Logger
3974 if !self.context.is_outbound() {
3975 panic!("Cannot send fee from inbound channel");
3977 if !self.context.is_usable() {
3978 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3980 if !self.context.is_live() {
3981 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3984 // Before proposing a feerate update, check that we can actually afford the new fee.
3985 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3986 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3987 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3988 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3989 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3990 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3991 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3992 //TODO: auto-close after a number of failures?
3993 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3997 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3998 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3999 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4000 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4001 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4002 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4005 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4006 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4010 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4011 force_holding_cell = true;
4014 if force_holding_cell {
4015 self.context.holding_cell_update_fee = Some(feerate_per_kw);
4019 debug_assert!(self.context.pending_update_fee.is_none());
4020 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4022 Some(msgs::UpdateFee {
4023 channel_id: self.context.channel_id,
4028 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4029 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4031 /// No further message handling calls may be made until a channel_reestablish dance has
4033 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
4034 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
4035 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4036 if self.context.channel_state.is_pre_funded_state() {
4040 if self.context.channel_state.is_peer_disconnected() {
4041 // While the below code should be idempotent, it's simpler to just return early, as
4042 // redundant disconnect events can fire, though they should be rare.
4046 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4047 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4050 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4051 // will be retransmitted.
4052 self.context.last_sent_closing_fee = None;
4053 self.context.pending_counterparty_closing_signed = None;
4054 self.context.closing_fee_limits = None;
4056 let mut inbound_drop_count = 0;
4057 self.context.pending_inbound_htlcs.retain(|htlc| {
4059 InboundHTLCState::RemoteAnnounced(_) => {
4060 // They sent us an update_add_htlc but we never got the commitment_signed.
4061 // We'll tell them what commitment_signed we're expecting next and they'll drop
4062 // this HTLC accordingly
4063 inbound_drop_count += 1;
4066 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4067 // We received a commitment_signed updating this HTLC and (at least hopefully)
4068 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4069 // in response to it yet, so don't touch it.
4072 InboundHTLCState::Committed => true,
4073 InboundHTLCState::LocalRemoved(_) => {
4074 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4075 // re-transmit if needed) and they may have even sent a revoke_and_ack back
4076 // (that we missed). Keep this around for now and if they tell us they missed
4077 // the commitment_signed we can re-transmit the update then.
4082 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4084 if let Some((_, update_state)) = self.context.pending_update_fee {
4085 if update_state == FeeUpdateState::RemoteAnnounced {
4086 debug_assert!(!self.context.is_outbound());
4087 self.context.pending_update_fee = None;
4091 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4092 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4093 // They sent us an update to remove this but haven't yet sent the corresponding
4094 // commitment_signed, we need to move it back to Committed and they can re-send
4095 // the update upon reconnection.
4096 htlc.state = OutboundHTLCState::Committed;
4100 self.context.sent_message_awaiting_response = None;
4102 self.context.channel_state.set_peer_disconnected();
4103 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4107 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4108 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4109 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4110 /// update completes (potentially immediately).
4111 /// The messages which were generated with the monitor update must *not* have been sent to the
4112 /// remote end, and must instead have been dropped. They will be regenerated when
4113 /// [`Self::monitor_updating_restored`] is called.
4115 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4116 /// [`chain::Watch`]: crate::chain::Watch
4117 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4118 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4119 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4120 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4121 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4123 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4124 self.context.monitor_pending_commitment_signed |= resend_commitment;
4125 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4126 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4127 self.context.monitor_pending_failures.append(&mut pending_fails);
4128 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4129 self.context.channel_state.set_monitor_update_in_progress();
4132 /// Indicates that the latest ChannelMonitor update has been committed by the client
4133 /// successfully and we should restore normal operation. Returns messages which should be sent
4134 /// to the remote side.
4135 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4136 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4137 user_config: &UserConfig, best_block_height: u32
4138 ) -> MonitorRestoreUpdates
4141 NS::Target: NodeSigner
4143 assert!(self.context.channel_state.is_monitor_update_in_progress());
4144 self.context.channel_state.clear_monitor_update_in_progress();
4146 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4147 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4148 // first received the funding_signed.
4149 let mut funding_broadcastable =
4150 if self.context.is_outbound() &&
4151 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4152 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
4154 self.context.funding_transaction.take()
4156 // That said, if the funding transaction is already confirmed (ie we're active with a
4157 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4158 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4159 funding_broadcastable = None;
4162 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4163 // (and we assume the user never directly broadcasts the funding transaction and waits for
4164 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4165 // * an inbound channel that failed to persist the monitor on funding_created and we got
4166 // the funding transaction confirmed before the monitor was persisted, or
4167 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4168 let channel_ready = if self.context.monitor_pending_channel_ready {
4169 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4170 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4171 self.context.monitor_pending_channel_ready = false;
4172 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4173 Some(msgs::ChannelReady {
4174 channel_id: self.context.channel_id(),
4175 next_per_commitment_point,
4176 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4180 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4182 let mut accepted_htlcs = Vec::new();
4183 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4184 let mut failed_htlcs = Vec::new();
4185 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4186 let mut finalized_claimed_htlcs = Vec::new();
4187 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4189 if self.context.channel_state.is_peer_disconnected() {
4190 self.context.monitor_pending_revoke_and_ack = false;
4191 self.context.monitor_pending_commitment_signed = false;
4192 return MonitorRestoreUpdates {
4193 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4194 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4198 let raa = if self.context.monitor_pending_revoke_and_ack {
4199 Some(self.get_last_revoke_and_ack())
4201 let commitment_update = if self.context.monitor_pending_commitment_signed {
4202 self.get_last_commitment_update_for_send(logger).ok()
4204 if commitment_update.is_some() {
4205 self.mark_awaiting_response();
4208 self.context.monitor_pending_revoke_and_ack = false;
4209 self.context.monitor_pending_commitment_signed = false;
4210 let order = self.context.resend_order.clone();
4211 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4212 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4213 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4214 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4215 MonitorRestoreUpdates {
4216 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4220 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4221 where F::Target: FeeEstimator, L::Target: Logger
4223 if self.context.is_outbound() {
4224 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4226 if self.context.channel_state.is_peer_disconnected() {
4227 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4229 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4231 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4232 self.context.update_time_counter += 1;
4233 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4234 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4235 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4236 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4237 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4238 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4239 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4240 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4241 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4242 msg.feerate_per_kw, holder_tx_dust_exposure)));
4244 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4245 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4246 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4252 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4255 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4256 let commitment_update = if self.context.signer_pending_commitment_update {
4257 self.get_last_commitment_update_for_send(logger).ok()
4259 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4260 self.context.get_funding_signed_msg(logger).1
4262 let channel_ready = if funding_signed.is_some() {
4263 self.check_get_channel_ready(0)
4266 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
4267 if commitment_update.is_some() { "a" } else { "no" },
4268 if funding_signed.is_some() { "a" } else { "no" },
4269 if channel_ready.is_some() { "a" } else { "no" });
4271 SignerResumeUpdates {
4278 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4279 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4280 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4281 msgs::RevokeAndACK {
4282 channel_id: self.context.channel_id,
4283 per_commitment_secret,
4284 next_per_commitment_point,
4286 next_local_nonce: None,
4290 /// Gets the last commitment update for immediate sending to our peer.
4291 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4292 let mut update_add_htlcs = Vec::new();
4293 let mut update_fulfill_htlcs = Vec::new();
4294 let mut update_fail_htlcs = Vec::new();
4295 let mut update_fail_malformed_htlcs = Vec::new();
4297 for htlc in self.context.pending_outbound_htlcs.iter() {
4298 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4299 update_add_htlcs.push(msgs::UpdateAddHTLC {
4300 channel_id: self.context.channel_id(),
4301 htlc_id: htlc.htlc_id,
4302 amount_msat: htlc.amount_msat,
4303 payment_hash: htlc.payment_hash,
4304 cltv_expiry: htlc.cltv_expiry,
4305 onion_routing_packet: (**onion_packet).clone(),
4306 skimmed_fee_msat: htlc.skimmed_fee_msat,
4307 blinding_point: htlc.blinding_point,
4312 for htlc in self.context.pending_inbound_htlcs.iter() {
4313 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4315 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4316 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4317 channel_id: self.context.channel_id(),
4318 htlc_id: htlc.htlc_id,
4319 reason: err_packet.clone()
4322 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4323 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4324 channel_id: self.context.channel_id(),
4325 htlc_id: htlc.htlc_id,
4326 sha256_of_onion: sha256_of_onion.clone(),
4327 failure_code: failure_code.clone(),
4330 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4331 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4332 channel_id: self.context.channel_id(),
4333 htlc_id: htlc.htlc_id,
4334 payment_preimage: payment_preimage.clone(),
4341 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4342 Some(msgs::UpdateFee {
4343 channel_id: self.context.channel_id(),
4344 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4348 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4349 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4350 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4351 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4352 if self.context.signer_pending_commitment_update {
4353 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4354 self.context.signer_pending_commitment_update = false;
4358 if !self.context.signer_pending_commitment_update {
4359 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4360 self.context.signer_pending_commitment_update = true;
4364 Ok(msgs::CommitmentUpdate {
4365 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4370 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4371 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4372 if self.context.channel_state.is_local_shutdown_sent() {
4373 assert!(self.context.shutdown_scriptpubkey.is_some());
4374 Some(msgs::Shutdown {
4375 channel_id: self.context.channel_id,
4376 scriptpubkey: self.get_closing_scriptpubkey(),
4381 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4382 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4384 /// Some links printed in log lines are included here to check them during build (when run with
4385 /// `cargo doc --document-private-items`):
4386 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4387 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4388 pub fn channel_reestablish<L: Deref, NS: Deref>(
4389 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4390 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4391 ) -> Result<ReestablishResponses, ChannelError>
4394 NS::Target: NodeSigner
4396 if !self.context.channel_state.is_peer_disconnected() {
4397 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4398 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4399 // just close here instead of trying to recover.
4400 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4403 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4404 msg.next_local_commitment_number == 0 {
4405 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4408 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4409 if msg.next_remote_commitment_number > 0 {
4410 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4411 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4412 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4413 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4414 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4416 if msg.next_remote_commitment_number > our_commitment_transaction {
4417 macro_rules! log_and_panic {
4418 ($err_msg: expr) => {
4419 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4420 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4423 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4424 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4425 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4426 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4427 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4428 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4429 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4430 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4434 // Before we change the state of the channel, we check if the peer is sending a very old
4435 // commitment transaction number, if yes we send a warning message.
4436 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4437 return Err(ChannelError::Warn(format!(
4438 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4439 msg.next_remote_commitment_number,
4440 our_commitment_transaction
4444 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4445 // remaining cases either succeed or ErrorMessage-fail).
4446 self.context.channel_state.clear_peer_disconnected();
4447 self.context.sent_message_awaiting_response = None;
4449 let shutdown_msg = self.get_outbound_shutdown();
4451 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4453 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4454 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4455 if !self.context.channel_state.is_our_channel_ready() ||
4456 self.context.channel_state.is_monitor_update_in_progress() {
4457 if msg.next_remote_commitment_number != 0 {
4458 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4460 // Short circuit the whole handler as there is nothing we can resend them
4461 return Ok(ReestablishResponses {
4462 channel_ready: None,
4463 raa: None, commitment_update: None,
4464 order: RAACommitmentOrder::CommitmentFirst,
4465 shutdown_msg, announcement_sigs,
4469 // We have OurChannelReady set!
4470 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4471 return Ok(ReestablishResponses {
4472 channel_ready: Some(msgs::ChannelReady {
4473 channel_id: self.context.channel_id(),
4474 next_per_commitment_point,
4475 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4477 raa: None, commitment_update: None,
4478 order: RAACommitmentOrder::CommitmentFirst,
4479 shutdown_msg, announcement_sigs,
4483 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4484 // Remote isn't waiting on any RevokeAndACK from us!
4485 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4487 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4488 if self.context.channel_state.is_monitor_update_in_progress() {
4489 self.context.monitor_pending_revoke_and_ack = true;
4492 Some(self.get_last_revoke_and_ack())
4495 debug_assert!(false, "All values should have been handled in the four cases above");
4496 return Err(ChannelError::Close(format!(
4497 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4498 msg.next_remote_commitment_number,
4499 our_commitment_transaction
4503 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4504 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4505 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4506 // the corresponding revoke_and_ack back yet.
4507 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4508 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4509 self.mark_awaiting_response();
4511 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4513 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4514 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4515 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4516 Some(msgs::ChannelReady {
4517 channel_id: self.context.channel_id(),
4518 next_per_commitment_point,
4519 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4523 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4524 if required_revoke.is_some() {
4525 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4527 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4530 Ok(ReestablishResponses {
4531 channel_ready, shutdown_msg, announcement_sigs,
4532 raa: required_revoke,
4533 commitment_update: None,
4534 order: self.context.resend_order.clone(),
4536 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4537 if required_revoke.is_some() {
4538 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4540 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4543 if self.context.channel_state.is_monitor_update_in_progress() {
4544 self.context.monitor_pending_commitment_signed = true;
4545 Ok(ReestablishResponses {
4546 channel_ready, shutdown_msg, announcement_sigs,
4547 commitment_update: None, raa: None,
4548 order: self.context.resend_order.clone(),
4551 Ok(ReestablishResponses {
4552 channel_ready, shutdown_msg, announcement_sigs,
4553 raa: required_revoke,
4554 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4555 order: self.context.resend_order.clone(),
4558 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4559 Err(ChannelError::Close(format!(
4560 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4561 msg.next_local_commitment_number,
4562 next_counterparty_commitment_number,
4565 Err(ChannelError::Close(format!(
4566 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4567 msg.next_local_commitment_number,
4568 next_counterparty_commitment_number,
4573 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4574 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4575 /// at which point they will be recalculated.
4576 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4578 where F::Target: FeeEstimator
4580 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4582 // Propose a range from our current Background feerate to our Normal feerate plus our
4583 // force_close_avoidance_max_fee_satoshis.
4584 // If we fail to come to consensus, we'll have to force-close.
4585 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4586 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4587 // that we don't expect to need fee bumping
4588 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4589 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4591 // The spec requires that (when the channel does not have anchors) we only send absolute
4592 // channel fees no greater than the absolute channel fee on the current commitment
4593 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4594 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4595 // some force-closure by old nodes, but we wanted to close the channel anyway.
4597 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4598 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4599 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4600 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4603 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4604 // below our dust limit, causing the output to disappear. We don't bother handling this
4605 // case, however, as this should only happen if a channel is closed before any (material)
4606 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4607 // come to consensus with our counterparty on appropriate fees, however it should be a
4608 // relatively rare case. We can revisit this later, though note that in order to determine
4609 // if the funders' output is dust we have to know the absolute fee we're going to use.
4610 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4611 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4612 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4613 // We always add force_close_avoidance_max_fee_satoshis to our normal
4614 // feerate-calculated fee, but allow the max to be overridden if we're using a
4615 // target feerate-calculated fee.
4616 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4617 proposed_max_feerate as u64 * tx_weight / 1000)
4619 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4622 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4623 self.context.closing_fee_limits.clone().unwrap()
4626 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4627 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4628 /// this point if we're the funder we should send the initial closing_signed, and in any case
4629 /// shutdown should complete within a reasonable timeframe.
4630 fn closing_negotiation_ready(&self) -> bool {
4631 self.context.closing_negotiation_ready()
4634 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4635 /// an Err if no progress is being made and the channel should be force-closed instead.
4636 /// Should be called on a one-minute timer.
4637 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4638 if self.closing_negotiation_ready() {
4639 if self.context.closing_signed_in_flight {
4640 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4642 self.context.closing_signed_in_flight = true;
4648 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4649 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4650 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4651 where F::Target: FeeEstimator, L::Target: Logger
4653 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4654 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4655 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4656 // that closing_negotiation_ready checks this case (as well as a few others).
4657 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4658 return Ok((None, None, None));
4661 if !self.context.is_outbound() {
4662 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4663 return self.closing_signed(fee_estimator, &msg);
4665 return Ok((None, None, None));
4668 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4669 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4670 if self.context.expecting_peer_commitment_signed {
4671 return Ok((None, None, None));
4674 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4676 assert!(self.context.shutdown_scriptpubkey.is_some());
4677 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4678 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4679 our_min_fee, our_max_fee, total_fee_satoshis);
4681 match &self.context.holder_signer {
4682 ChannelSignerType::Ecdsa(ecdsa) => {
4684 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4685 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4687 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4688 Ok((Some(msgs::ClosingSigned {
4689 channel_id: self.context.channel_id,
4690 fee_satoshis: total_fee_satoshis,
4692 fee_range: Some(msgs::ClosingSignedFeeRange {
4693 min_fee_satoshis: our_min_fee,
4694 max_fee_satoshis: our_max_fee,
4698 // TODO (taproot|arik)
4704 // Marks a channel as waiting for a response from the counterparty. If it's not received
4705 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4707 fn mark_awaiting_response(&mut self) {
4708 self.context.sent_message_awaiting_response = Some(0);
4711 /// Determines whether we should disconnect the counterparty due to not receiving a response
4712 /// within our expected timeframe.
4714 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4715 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4716 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4719 // Don't disconnect when we're not waiting on a response.
4722 *ticks_elapsed += 1;
4723 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4727 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4728 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4730 if self.context.channel_state.is_peer_disconnected() {
4731 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4733 if self.context.channel_state.is_pre_funded_state() {
4734 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4735 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4736 // can do that via error message without getting a connection fail anyway...
4737 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4739 for htlc in self.context.pending_inbound_htlcs.iter() {
4740 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4741 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4744 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4746 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4747 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4750 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4751 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4752 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4755 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4758 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4759 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4760 // any further commitment updates after we set LocalShutdownSent.
4761 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
4763 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4766 assert!(send_shutdown);
4767 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4768 Ok(scriptpubkey) => scriptpubkey,
4769 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4771 if !shutdown_scriptpubkey.is_compatible(their_features) {
4772 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4774 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4779 // From here on out, we may not fail!
4781 self.context.channel_state.set_remote_shutdown_sent();
4782 self.context.update_time_counter += 1;
4784 let monitor_update = if update_shutdown_script {
4785 self.context.latest_monitor_update_id += 1;
4786 let monitor_update = ChannelMonitorUpdate {
4787 update_id: self.context.latest_monitor_update_id,
4788 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4789 scriptpubkey: self.get_closing_scriptpubkey(),
4792 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4793 self.push_ret_blockable_mon_update(monitor_update)
4795 let shutdown = if send_shutdown {
4796 Some(msgs::Shutdown {
4797 channel_id: self.context.channel_id,
4798 scriptpubkey: self.get_closing_scriptpubkey(),
4802 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4803 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4804 // cell HTLCs and return them to fail the payment.
4805 self.context.holding_cell_update_fee = None;
4806 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4807 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4809 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4810 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4817 self.context.channel_state.set_local_shutdown_sent();
4818 self.context.update_time_counter += 1;
4820 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4823 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4824 let mut tx = closing_tx.trust().built_transaction().clone();
4826 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4828 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4829 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4830 let mut holder_sig = sig.serialize_der().to_vec();
4831 holder_sig.push(EcdsaSighashType::All as u8);
4832 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4833 cp_sig.push(EcdsaSighashType::All as u8);
4834 if funding_key[..] < counterparty_funding_key[..] {
4835 tx.input[0].witness.push(holder_sig);
4836 tx.input[0].witness.push(cp_sig);
4838 tx.input[0].witness.push(cp_sig);
4839 tx.input[0].witness.push(holder_sig);
4842 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4846 pub fn closing_signed<F: Deref>(
4847 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4848 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4849 where F::Target: FeeEstimator
4851 if !self.context.channel_state.is_both_sides_shutdown() {
4852 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4854 if self.context.channel_state.is_peer_disconnected() {
4855 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4857 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4858 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4860 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4861 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4864 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4865 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4868 if self.context.channel_state.is_monitor_update_in_progress() {
4869 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4870 return Ok((None, None, None));
4873 let funding_redeemscript = self.context.get_funding_redeemscript();
4874 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4875 if used_total_fee != msg.fee_satoshis {
4876 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4878 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4880 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4883 // The remote end may have decided to revoke their output due to inconsistent dust
4884 // limits, so check for that case by re-checking the signature here.
4885 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4886 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4887 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4891 for outp in closing_tx.trust().built_transaction().output.iter() {
4892 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4893 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4897 assert!(self.context.shutdown_scriptpubkey.is_some());
4898 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4899 if last_fee == msg.fee_satoshis {
4900 let shutdown_result = ShutdownResult {
4901 monitor_update: None,
4902 dropped_outbound_htlcs: Vec::new(),
4903 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4904 channel_id: self.context.channel_id,
4905 counterparty_node_id: self.context.counterparty_node_id,
4907 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4908 self.context.channel_state = ChannelState::ShutdownComplete;
4909 self.context.update_time_counter += 1;
4910 return Ok((None, Some(tx), Some(shutdown_result)));
4914 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4916 macro_rules! propose_fee {
4917 ($new_fee: expr) => {
4918 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4919 (closing_tx, $new_fee)
4921 self.build_closing_transaction($new_fee, false)
4924 return match &self.context.holder_signer {
4925 ChannelSignerType::Ecdsa(ecdsa) => {
4927 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4928 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4929 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4930 let shutdown_result = ShutdownResult {
4931 monitor_update: None,
4932 dropped_outbound_htlcs: Vec::new(),
4933 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4934 channel_id: self.context.channel_id,
4935 counterparty_node_id: self.context.counterparty_node_id,
4937 self.context.channel_state = ChannelState::ShutdownComplete;
4938 self.context.update_time_counter += 1;
4939 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4940 (Some(tx), Some(shutdown_result))
4945 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4946 Ok((Some(msgs::ClosingSigned {
4947 channel_id: self.context.channel_id,
4948 fee_satoshis: used_fee,
4950 fee_range: Some(msgs::ClosingSignedFeeRange {
4951 min_fee_satoshis: our_min_fee,
4952 max_fee_satoshis: our_max_fee,
4954 }), signed_tx, shutdown_result))
4956 // TODO (taproot|arik)
4963 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4964 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4965 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4967 if max_fee_satoshis < our_min_fee {
4968 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4970 if min_fee_satoshis > our_max_fee {
4971 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4974 if !self.context.is_outbound() {
4975 // They have to pay, so pick the highest fee in the overlapping range.
4976 // We should never set an upper bound aside from their full balance
4977 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4978 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4980 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4981 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4982 msg.fee_satoshis, our_min_fee, our_max_fee)));
4984 // The proposed fee is in our acceptable range, accept it and broadcast!
4985 propose_fee!(msg.fee_satoshis);
4988 // Old fee style negotiation. We don't bother to enforce whether they are complying
4989 // with the "making progress" requirements, we just comply and hope for the best.
4990 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4991 if msg.fee_satoshis > last_fee {
4992 if msg.fee_satoshis < our_max_fee {
4993 propose_fee!(msg.fee_satoshis);
4994 } else if last_fee < our_max_fee {
4995 propose_fee!(our_max_fee);
4997 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5000 if msg.fee_satoshis > our_min_fee {
5001 propose_fee!(msg.fee_satoshis);
5002 } else if last_fee > our_min_fee {
5003 propose_fee!(our_min_fee);
5005 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5009 if msg.fee_satoshis < our_min_fee {
5010 propose_fee!(our_min_fee);
5011 } else if msg.fee_satoshis > our_max_fee {
5012 propose_fee!(our_max_fee);
5014 propose_fee!(msg.fee_satoshis);
5020 fn internal_htlc_satisfies_config(
5021 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
5022 ) -> Result<(), (&'static str, u16)> {
5023 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
5024 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
5025 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
5026 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5028 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5029 0x1000 | 12, // fee_insufficient
5032 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5034 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5035 0x1000 | 13, // incorrect_cltv_expiry
5041 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5042 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5043 /// unsuccessful, falls back to the previous one if one exists.
5044 pub fn htlc_satisfies_config(
5045 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5046 ) -> Result<(), (&'static str, u16)> {
5047 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5049 if let Some(prev_config) = self.context.prev_config() {
5050 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5057 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5058 self.context.cur_holder_commitment_transaction_number + 1
5061 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5062 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
5065 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5066 self.context.cur_counterparty_commitment_transaction_number + 2
5070 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5071 &self.context.holder_signer
5075 pub fn get_value_stat(&self) -> ChannelValueStat {
5077 value_to_self_msat: self.context.value_to_self_msat,
5078 channel_value_msat: self.context.channel_value_satoshis * 1000,
5079 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5080 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5081 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5082 holding_cell_outbound_amount_msat: {
5084 for h in self.context.holding_cell_htlc_updates.iter() {
5086 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5094 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5095 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5099 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5100 /// Allowed in any state (including after shutdown)
5101 pub fn is_awaiting_monitor_update(&self) -> bool {
5102 self.context.channel_state.is_monitor_update_in_progress()
5105 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5106 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5107 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5108 self.context.blocked_monitor_updates[0].update.update_id - 1
5111 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5112 /// further blocked monitor update exists after the next.
5113 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5114 if self.context.blocked_monitor_updates.is_empty() { return None; }
5115 Some((self.context.blocked_monitor_updates.remove(0).update,
5116 !self.context.blocked_monitor_updates.is_empty()))
5119 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5120 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5121 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5122 -> Option<ChannelMonitorUpdate> {
5123 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5124 if !release_monitor {
5125 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5134 pub fn blocked_monitor_updates_pending(&self) -> usize {
5135 self.context.blocked_monitor_updates.len()
5138 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5139 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5140 /// transaction. If the channel is inbound, this implies simply that the channel has not
5142 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5143 if !self.is_awaiting_monitor_update() { return false; }
5145 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5146 if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
5148 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5149 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5150 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5153 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5154 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5155 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5156 // waiting for the initial monitor persistence. Thus, we check if our commitment
5157 // transaction numbers have both been iterated only exactly once (for the
5158 // funding_signed), and we're awaiting monitor update.
5160 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5161 // only way to get an awaiting-monitor-update state during initial funding is if the
5162 // initial monitor persistence is still pending).
5164 // Because deciding we're awaiting initial broadcast spuriously could result in
5165 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5166 // we hard-assert here, even in production builds.
5167 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5168 assert!(self.context.monitor_pending_channel_ready);
5169 assert_eq!(self.context.latest_monitor_update_id, 0);
5175 /// Returns true if our channel_ready has been sent
5176 pub fn is_our_channel_ready(&self) -> bool {
5177 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5178 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5181 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5182 pub fn received_shutdown(&self) -> bool {
5183 self.context.channel_state.is_remote_shutdown_sent()
5186 /// Returns true if we either initiated or agreed to shut down the channel.
5187 pub fn sent_shutdown(&self) -> bool {
5188 self.context.channel_state.is_local_shutdown_sent()
5191 /// Returns true if this channel is fully shut down. True here implies that no further actions
5192 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5193 /// will be handled appropriately by the chain monitor.
5194 pub fn is_shutdown(&self) -> bool {
5195 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5198 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5199 self.context.channel_update_status
5202 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5203 self.context.update_time_counter += 1;
5204 self.context.channel_update_status = status;
5207 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5209 // * always when a new block/transactions are confirmed with the new height
5210 // * when funding is signed with a height of 0
5211 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5215 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5216 if funding_tx_confirmations <= 0 {
5217 self.context.funding_tx_confirmation_height = 0;
5220 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5224 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5225 // channel_ready yet.
5226 if self.context.signer_pending_funding {
5230 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5231 // channel_ready until the entire batch is ready.
5232 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
5233 self.context.channel_state.set_our_channel_ready();
5235 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5236 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5237 self.context.update_time_counter += 1;
5239 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5240 // We got a reorg but not enough to trigger a force close, just ignore.
5243 if self.context.funding_tx_confirmation_height != 0 &&
5244 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5246 // We should never see a funding transaction on-chain until we've received
5247 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5248 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5249 // however, may do this and we shouldn't treat it as a bug.
5250 #[cfg(not(fuzzing))]
5251 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5252 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5253 self.context.channel_state.to_u32());
5255 // We got a reorg but not enough to trigger a force close, just ignore.
5259 if need_commitment_update {
5260 if !self.context.channel_state.is_monitor_update_in_progress() {
5261 if !self.context.channel_state.is_peer_disconnected() {
5262 let next_per_commitment_point =
5263 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5264 return Some(msgs::ChannelReady {
5265 channel_id: self.context.channel_id,
5266 next_per_commitment_point,
5267 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5271 self.context.monitor_pending_channel_ready = true;
5277 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5278 /// In the first case, we store the confirmation height and calculating the short channel id.
5279 /// In the second, we simply return an Err indicating we need to be force-closed now.
5280 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5281 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5282 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5283 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5285 NS::Target: NodeSigner,
5288 let mut msgs = (None, None);
5289 if let Some(funding_txo) = self.context.get_funding_txo() {
5290 for &(index_in_block, tx) in txdata.iter() {
5291 // Check if the transaction is the expected funding transaction, and if it is,
5292 // check that it pays the right amount to the right script.
5293 if self.context.funding_tx_confirmation_height == 0 {
5294 if tx.txid() == funding_txo.txid {
5295 let txo_idx = funding_txo.index as usize;
5296 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5297 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5298 if self.context.is_outbound() {
5299 // If we generated the funding transaction and it doesn't match what it
5300 // should, the client is really broken and we should just panic and
5301 // tell them off. That said, because hash collisions happen with high
5302 // probability in fuzzing mode, if we're fuzzing we just close the
5303 // channel and move on.
5304 #[cfg(not(fuzzing))]
5305 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5307 self.context.update_time_counter += 1;
5308 let err_reason = "funding tx had wrong script/value or output index";
5309 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5311 if self.context.is_outbound() {
5312 if !tx.is_coin_base() {
5313 for input in tx.input.iter() {
5314 if input.witness.is_empty() {
5315 // We generated a malleable funding transaction, implying we've
5316 // just exposed ourselves to funds loss to our counterparty.
5317 #[cfg(not(fuzzing))]
5318 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5323 self.context.funding_tx_confirmation_height = height;
5324 self.context.funding_tx_confirmed_in = Some(*block_hash);
5325 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5326 Ok(scid) => Some(scid),
5327 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5330 // If this is a coinbase transaction and not a 0-conf channel
5331 // we should update our min_depth to 100 to handle coinbase maturity
5332 if tx.is_coin_base() &&
5333 self.context.minimum_depth.unwrap_or(0) > 0 &&
5334 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5335 self.context.minimum_depth = Some(COINBASE_MATURITY);
5338 // If we allow 1-conf funding, we may need to check for channel_ready here and
5339 // send it immediately instead of waiting for a best_block_updated call (which
5340 // may have already happened for this block).
5341 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5342 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5343 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5344 msgs = (Some(channel_ready), announcement_sigs);
5347 for inp in tx.input.iter() {
5348 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5349 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5350 return Err(ClosureReason::CommitmentTxConfirmed);
5358 /// When a new block is connected, we check the height of the block against outbound holding
5359 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5360 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5361 /// handled by the ChannelMonitor.
5363 /// If we return Err, the channel may have been closed, at which point the standard
5364 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5367 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5369 pub fn best_block_updated<NS: Deref, L: Deref>(
5370 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5371 node_signer: &NS, user_config: &UserConfig, logger: &L
5372 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5374 NS::Target: NodeSigner,
5377 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5380 fn do_best_block_updated<NS: Deref, L: Deref>(
5381 &mut self, height: u32, highest_header_time: u32,
5382 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5383 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5385 NS::Target: NodeSigner,
5388 let mut timed_out_htlcs = Vec::new();
5389 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5390 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5392 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5393 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5395 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5396 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5397 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5405 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5407 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5408 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5409 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5411 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5412 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5415 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5416 self.context.channel_state.is_our_channel_ready() {
5417 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5418 if self.context.funding_tx_confirmation_height == 0 {
5419 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5420 // zero if it has been reorged out, however in either case, our state flags
5421 // indicate we've already sent a channel_ready
5422 funding_tx_confirmations = 0;
5425 // If we've sent channel_ready (or have both sent and received channel_ready), and
5426 // the funding transaction has become unconfirmed,
5427 // close the channel and hope we can get the latest state on chain (because presumably
5428 // the funding transaction is at least still in the mempool of most nodes).
5430 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5431 // 0-conf channel, but not doing so may lead to the
5432 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5434 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5435 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5436 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5437 return Err(ClosureReason::ProcessingError { err: err_reason });
5439 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5440 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5441 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5442 // If funding_tx_confirmed_in is unset, the channel must not be active
5443 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5444 assert!(!self.context.channel_state.is_our_channel_ready());
5445 return Err(ClosureReason::FundingTimedOut);
5448 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5449 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5451 Ok((None, timed_out_htlcs, announcement_sigs))
5454 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5455 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5456 /// before the channel has reached channel_ready and we can just wait for more blocks.
5457 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5458 if self.context.funding_tx_confirmation_height != 0 {
5459 // We handle the funding disconnection by calling best_block_updated with a height one
5460 // below where our funding was connected, implying a reorg back to conf_height - 1.
5461 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5462 // We use the time field to bump the current time we set on channel updates if its
5463 // larger. If we don't know that time has moved forward, we can just set it to the last
5464 // time we saw and it will be ignored.
5465 let best_time = self.context.update_time_counter;
5466 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
5467 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5468 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5469 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5470 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5476 // We never learned about the funding confirmation anyway, just ignore
5481 // Methods to get unprompted messages to send to the remote end (or where we already returned
5482 // something in the handler for the message that prompted this message):
5484 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5485 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5486 /// directions). Should be used for both broadcasted announcements and in response to an
5487 /// AnnouncementSignatures message from the remote peer.
5489 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5492 /// This will only return ChannelError::Ignore upon failure.
5494 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5495 fn get_channel_announcement<NS: Deref>(
5496 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5497 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5498 if !self.context.config.announced_channel {
5499 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5501 if !self.context.is_usable() {
5502 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5505 let short_channel_id = self.context.get_short_channel_id()
5506 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5507 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5508 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5509 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5510 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5512 let msg = msgs::UnsignedChannelAnnouncement {
5513 features: channelmanager::provided_channel_features(&user_config),
5516 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5517 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5518 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5519 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5520 excess_data: Vec::new(),
5526 fn get_announcement_sigs<NS: Deref, L: Deref>(
5527 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5528 best_block_height: u32, logger: &L
5529 ) -> Option<msgs::AnnouncementSignatures>
5531 NS::Target: NodeSigner,
5534 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5538 if !self.context.is_usable() {
5542 if self.context.channel_state.is_peer_disconnected() {
5543 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5547 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5551 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5552 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5555 log_trace!(logger, "{:?}", e);
5559 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5561 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5566 match &self.context.holder_signer {
5567 ChannelSignerType::Ecdsa(ecdsa) => {
5568 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5570 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5575 let short_channel_id = match self.context.get_short_channel_id() {
5577 None => return None,
5580 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5582 Some(msgs::AnnouncementSignatures {
5583 channel_id: self.context.channel_id(),
5585 node_signature: our_node_sig,
5586 bitcoin_signature: our_bitcoin_sig,
5589 // TODO (taproot|arik)
5595 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5597 fn sign_channel_announcement<NS: Deref>(
5598 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5599 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5600 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5601 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5602 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5603 let were_node_one = announcement.node_id_1 == our_node_key;
5605 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5606 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5607 match &self.context.holder_signer {
5608 ChannelSignerType::Ecdsa(ecdsa) => {
5609 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5610 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5611 Ok(msgs::ChannelAnnouncement {
5612 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5613 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5614 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5615 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5616 contents: announcement,
5619 // TODO (taproot|arik)
5624 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5628 /// Processes an incoming announcement_signatures message, providing a fully-signed
5629 /// channel_announcement message which we can broadcast and storing our counterparty's
5630 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5631 pub fn announcement_signatures<NS: Deref>(
5632 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5633 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5634 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5635 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5637 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5639 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5640 return Err(ChannelError::Close(format!(
5641 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5642 &announcement, self.context.get_counterparty_node_id())));
5644 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5645 return Err(ChannelError::Close(format!(
5646 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5647 &announcement, self.context.counterparty_funding_pubkey())));
5650 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5651 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5652 return Err(ChannelError::Ignore(
5653 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5656 self.sign_channel_announcement(node_signer, announcement)
5659 /// Gets a signed channel_announcement for this channel, if we previously received an
5660 /// announcement_signatures from our counterparty.
5661 pub fn get_signed_channel_announcement<NS: Deref>(
5662 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5663 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5664 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5667 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5669 Err(_) => return None,
5671 match self.sign_channel_announcement(node_signer, announcement) {
5672 Ok(res) => Some(res),
5677 /// May panic if called on a channel that wasn't immediately-previously
5678 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5679 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5680 assert!(self.context.channel_state.is_peer_disconnected());
5681 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5682 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5683 // current to_remote balances. However, it no longer has any use, and thus is now simply
5684 // set to a dummy (but valid, as required by the spec) public key.
5685 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5686 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5687 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5688 let mut pk = [2; 33]; pk[1] = 0xff;
5689 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5690 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5691 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5692 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5695 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5698 self.mark_awaiting_response();
5699 msgs::ChannelReestablish {
5700 channel_id: self.context.channel_id(),
5701 // The protocol has two different commitment number concepts - the "commitment
5702 // transaction number", which starts from 0 and counts up, and the "revocation key
5703 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5704 // commitment transaction numbers by the index which will be used to reveal the
5705 // revocation key for that commitment transaction, which means we have to convert them
5706 // to protocol-level commitment numbers here...
5708 // next_local_commitment_number is the next commitment_signed number we expect to
5709 // receive (indicating if they need to resend one that we missed).
5710 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5711 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5712 // receive, however we track it by the next commitment number for a remote transaction
5713 // (which is one further, as they always revoke previous commitment transaction, not
5714 // the one we send) so we have to decrement by 1. Note that if
5715 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5716 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
5718 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5719 your_last_per_commitment_secret: remote_last_secret,
5720 my_current_per_commitment_point: dummy_pubkey,
5721 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5722 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5723 // txid of that interactive transaction, else we MUST NOT set it.
5724 next_funding_txid: None,
5729 // Send stuff to our remote peers:
5731 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5732 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5733 /// commitment update.
5735 /// `Err`s will only be [`ChannelError::Ignore`].
5736 pub fn queue_add_htlc<F: Deref, L: Deref>(
5737 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5738 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5739 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5740 ) -> Result<(), ChannelError>
5741 where F::Target: FeeEstimator, L::Target: Logger
5744 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5745 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5746 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5748 if let ChannelError::Ignore(_) = err { /* fine */ }
5749 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5754 /// Adds a pending outbound HTLC to this channel, note that you probably want
5755 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5757 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5759 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5760 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5762 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5763 /// we may not yet have sent the previous commitment update messages and will need to
5764 /// regenerate them.
5766 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5767 /// on this [`Channel`] if `force_holding_cell` is false.
5769 /// `Err`s will only be [`ChannelError::Ignore`].
5770 fn send_htlc<F: Deref, L: Deref>(
5771 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5772 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5773 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5774 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5775 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5776 where F::Target: FeeEstimator, L::Target: Logger
5778 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5779 self.context.channel_state.is_local_shutdown_sent() ||
5780 self.context.channel_state.is_remote_shutdown_sent()
5782 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5784 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5785 if amount_msat > channel_total_msat {
5786 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5789 if amount_msat == 0 {
5790 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5793 let available_balances = self.context.get_available_balances(fee_estimator);
5794 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5795 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5796 available_balances.next_outbound_htlc_minimum_msat)));
5799 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5800 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5801 available_balances.next_outbound_htlc_limit_msat)));
5804 if self.context.channel_state.is_peer_disconnected() {
5805 // Note that this should never really happen, if we're !is_live() on receipt of an
5806 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5807 // the user to send directly into a !is_live() channel. However, if we
5808 // disconnected during the time the previous hop was doing the commitment dance we may
5809 // end up getting here after the forwarding delay. In any case, returning an
5810 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5811 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5814 let need_holding_cell = self.context.channel_state.should_force_holding_cell();
5815 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5816 payment_hash, amount_msat,
5817 if force_holding_cell { "into holding cell" }
5818 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5819 else { "to peer" });
5821 if need_holding_cell {
5822 force_holding_cell = true;
5825 // Now update local state:
5826 if force_holding_cell {
5827 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5832 onion_routing_packet,
5839 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5840 htlc_id: self.context.next_holder_htlc_id,
5842 payment_hash: payment_hash.clone(),
5844 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5850 let res = msgs::UpdateAddHTLC {
5851 channel_id: self.context.channel_id,
5852 htlc_id: self.context.next_holder_htlc_id,
5856 onion_routing_packet,
5860 self.context.next_holder_htlc_id += 1;
5865 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5866 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5867 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5868 // fail to generate this, we still are at least at a position where upgrading their status
5870 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5871 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5872 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5874 if let Some(state) = new_state {
5875 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5879 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5880 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5881 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5882 // Grab the preimage, if it exists, instead of cloning
5883 let mut reason = OutboundHTLCOutcome::Success(None);
5884 mem::swap(outcome, &mut reason);
5885 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5888 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5889 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5890 debug_assert!(!self.context.is_outbound());
5891 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5892 self.context.feerate_per_kw = feerate;
5893 self.context.pending_update_fee = None;
5896 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5898 let (mut htlcs_ref, counterparty_commitment_tx) =
5899 self.build_commitment_no_state_update(logger);
5900 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5901 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5902 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5904 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5905 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5908 self.context.latest_monitor_update_id += 1;
5909 let monitor_update = ChannelMonitorUpdate {
5910 update_id: self.context.latest_monitor_update_id,
5911 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5912 commitment_txid: counterparty_commitment_txid,
5913 htlc_outputs: htlcs.clone(),
5914 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5915 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5916 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5917 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5918 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5921 self.context.channel_state.set_awaiting_remote_revoke();
5925 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5926 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5927 where L::Target: Logger
5929 let counterparty_keys = self.context.build_remote_transaction_keys();
5930 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5931 let counterparty_commitment_tx = commitment_stats.tx;
5933 #[cfg(any(test, fuzzing))]
5935 if !self.context.is_outbound() {
5936 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5937 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5938 if let Some(info) = projected_commit_tx_info {
5939 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5940 if info.total_pending_htlcs == total_pending_htlcs
5941 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5942 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5943 && info.feerate == self.context.feerate_per_kw {
5944 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5945 assert_eq!(actual_fee, info.fee);
5951 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5954 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5955 /// generation when we shouldn't change HTLC/channel state.
5956 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5957 // Get the fee tests from `build_commitment_no_state_update`
5958 #[cfg(any(test, fuzzing))]
5959 self.build_commitment_no_state_update(logger);
5961 let counterparty_keys = self.context.build_remote_transaction_keys();
5962 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5963 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5965 match &self.context.holder_signer {
5966 ChannelSignerType::Ecdsa(ecdsa) => {
5967 let (signature, htlc_signatures);
5970 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5971 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5975 let res = ecdsa.sign_counterparty_commitment(
5976 &commitment_stats.tx,
5977 commitment_stats.inbound_htlc_preimages,
5978 commitment_stats.outbound_htlc_preimages,
5979 &self.context.secp_ctx,
5980 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5982 htlc_signatures = res.1;
5984 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5985 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5986 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5987 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5989 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5990 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5991 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5992 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5993 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
5994 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5998 Ok((msgs::CommitmentSigned {
5999 channel_id: self.context.channel_id,
6003 partial_signature_with_nonce: None,
6004 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6006 // TODO (taproot|arik)
6012 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6013 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6015 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6016 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6017 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
6018 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
6019 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6020 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6021 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
6022 where F::Target: FeeEstimator, L::Target: Logger
6024 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
6025 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
6026 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6029 let monitor_update = self.build_commitment_no_status_check(logger);
6030 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6031 Ok(self.push_ret_blockable_mon_update(monitor_update))
6037 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
6039 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
6040 let new_forwarding_info = Some(CounterpartyForwardingInfo {
6041 fee_base_msat: msg.contents.fee_base_msat,
6042 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6043 cltv_expiry_delta: msg.contents.cltv_expiry_delta
6045 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
6047 self.context.counterparty_forwarding_info = new_forwarding_info;
6053 /// Begins the shutdown process, getting a message for the remote peer and returning all
6054 /// holding cell HTLCs for payment failure.
6055 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6056 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6057 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6059 for htlc in self.context.pending_outbound_htlcs.iter() {
6060 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6061 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6064 if self.context.channel_state.is_local_shutdown_sent() {
6065 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6067 else if self.context.channel_state.is_remote_shutdown_sent() {
6068 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6070 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6071 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6073 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6074 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6075 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6078 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6081 // use override shutdown script if provided
6082 let shutdown_scriptpubkey = match override_shutdown_script {
6083 Some(script) => script,
6085 // otherwise, use the shutdown scriptpubkey provided by the signer
6086 match signer_provider.get_shutdown_scriptpubkey() {
6087 Ok(scriptpubkey) => scriptpubkey,
6088 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6092 if !shutdown_scriptpubkey.is_compatible(their_features) {
6093 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6095 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6100 // From here on out, we may not fail!
6101 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6102 self.context.channel_state.set_local_shutdown_sent();
6103 self.context.update_time_counter += 1;
6105 let monitor_update = if update_shutdown_script {
6106 self.context.latest_monitor_update_id += 1;
6107 let monitor_update = ChannelMonitorUpdate {
6108 update_id: self.context.latest_monitor_update_id,
6109 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6110 scriptpubkey: self.get_closing_scriptpubkey(),
6113 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6114 self.push_ret_blockable_mon_update(monitor_update)
6116 let shutdown = msgs::Shutdown {
6117 channel_id: self.context.channel_id,
6118 scriptpubkey: self.get_closing_scriptpubkey(),
6121 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6122 // our shutdown until we've committed all of the pending changes.
6123 self.context.holding_cell_update_fee = None;
6124 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6125 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6127 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6128 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6135 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6136 "we can't both complete shutdown and return a monitor update");
6138 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6141 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6142 self.context.holding_cell_htlc_updates.iter()
6143 .flat_map(|htlc_update| {
6145 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6146 => Some((source, payment_hash)),
6150 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6154 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6155 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6156 pub context: ChannelContext<SP>,
6157 pub unfunded_context: UnfundedChannelContext,
6160 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6161 pub fn new<ES: Deref, F: Deref>(
6162 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6163 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6164 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6165 ) -> Result<OutboundV1Channel<SP>, APIError>
6166 where ES::Target: EntropySource,
6167 F::Target: FeeEstimator
6169 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6170 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6171 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6172 let pubkeys = holder_signer.pubkeys().clone();
6174 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6175 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6177 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6178 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6180 let channel_value_msat = channel_value_satoshis * 1000;
6181 if push_msat > channel_value_msat {
6182 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6184 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6185 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6187 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6188 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6189 // Protocol level safety check in place, although it should never happen because
6190 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6191 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6194 let channel_type = Self::get_initial_channel_type(&config, their_features);
6195 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6197 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6198 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6200 (ConfirmationTarget::NonAnchorChannelFee, 0)
6202 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6204 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6205 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6206 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6207 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6210 let mut secp_ctx = Secp256k1::new();
6211 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6213 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6214 match signer_provider.get_shutdown_scriptpubkey() {
6215 Ok(scriptpubkey) => Some(scriptpubkey),
6216 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6220 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6221 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6222 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6226 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6227 Ok(script) => script,
6228 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6231 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6234 context: ChannelContext {
6237 config: LegacyChannelConfig {
6238 options: config.channel_config.clone(),
6239 announced_channel: config.channel_handshake_config.announced_channel,
6240 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6245 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6247 channel_id: temporary_channel_id,
6248 temporary_channel_id: Some(temporary_channel_id),
6249 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6250 announcement_sigs_state: AnnouncementSigsState::NotSent,
6252 channel_value_satoshis,
6254 latest_monitor_update_id: 0,
6256 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6257 shutdown_scriptpubkey,
6260 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6261 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6264 pending_inbound_htlcs: Vec::new(),
6265 pending_outbound_htlcs: Vec::new(),
6266 holding_cell_htlc_updates: Vec::new(),
6267 pending_update_fee: None,
6268 holding_cell_update_fee: None,
6269 next_holder_htlc_id: 0,
6270 next_counterparty_htlc_id: 0,
6271 update_time_counter: 1,
6273 resend_order: RAACommitmentOrder::CommitmentFirst,
6275 monitor_pending_channel_ready: false,
6276 monitor_pending_revoke_and_ack: false,
6277 monitor_pending_commitment_signed: false,
6278 monitor_pending_forwards: Vec::new(),
6279 monitor_pending_failures: Vec::new(),
6280 monitor_pending_finalized_fulfills: Vec::new(),
6282 signer_pending_commitment_update: false,
6283 signer_pending_funding: false,
6285 #[cfg(debug_assertions)]
6286 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6287 #[cfg(debug_assertions)]
6288 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6290 last_sent_closing_fee: None,
6291 pending_counterparty_closing_signed: None,
6292 expecting_peer_commitment_signed: false,
6293 closing_fee_limits: None,
6294 target_closing_feerate_sats_per_kw: None,
6296 funding_tx_confirmed_in: None,
6297 funding_tx_confirmation_height: 0,
6298 short_channel_id: None,
6299 channel_creation_height: current_chain_height,
6301 feerate_per_kw: commitment_feerate,
6302 counterparty_dust_limit_satoshis: 0,
6303 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6304 counterparty_max_htlc_value_in_flight_msat: 0,
6305 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6306 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6307 holder_selected_channel_reserve_satoshis,
6308 counterparty_htlc_minimum_msat: 0,
6309 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6310 counterparty_max_accepted_htlcs: 0,
6311 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6312 minimum_depth: None, // Filled in in accept_channel
6314 counterparty_forwarding_info: None,
6316 channel_transaction_parameters: ChannelTransactionParameters {
6317 holder_pubkeys: pubkeys,
6318 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6319 is_outbound_from_holder: true,
6320 counterparty_parameters: None,
6321 funding_outpoint: None,
6322 channel_type_features: channel_type.clone()
6324 funding_transaction: None,
6325 is_batch_funding: None,
6327 counterparty_cur_commitment_point: None,
6328 counterparty_prev_commitment_point: None,
6329 counterparty_node_id,
6331 counterparty_shutdown_scriptpubkey: None,
6333 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6335 channel_update_status: ChannelUpdateStatus::Enabled,
6336 closing_signed_in_flight: false,
6338 announcement_sigs: None,
6340 #[cfg(any(test, fuzzing))]
6341 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6342 #[cfg(any(test, fuzzing))]
6343 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6345 workaround_lnd_bug_4006: None,
6346 sent_message_awaiting_response: None,
6348 latest_inbound_scid_alias: None,
6349 outbound_scid_alias,
6351 channel_pending_event_emitted: false,
6352 channel_ready_event_emitted: false,
6354 #[cfg(any(test, fuzzing))]
6355 historical_inbound_htlc_fulfills: HashSet::new(),
6360 blocked_monitor_updates: Vec::new(),
6362 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6366 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
6367 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6368 let counterparty_keys = self.context.build_remote_transaction_keys();
6369 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6370 let signature = match &self.context.holder_signer {
6371 // TODO (taproot|arik): move match into calling method for Taproot
6372 ChannelSignerType::Ecdsa(ecdsa) => {
6373 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
6374 .map(|(sig, _)| sig).ok()?
6376 // TODO (taproot|arik)
6381 if self.context.signer_pending_funding {
6382 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
6383 self.context.signer_pending_funding = false;
6386 Some(msgs::FundingCreated {
6387 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
6388 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
6389 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
6392 partial_signature_with_nonce: None,
6394 next_local_nonce: None,
6398 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6399 /// a funding_created message for the remote peer.
6400 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6401 /// or if called on an inbound channel.
6402 /// Note that channel_id changes during this call!
6403 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6404 /// If an Err is returned, it is a ChannelError::Close.
6405 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6406 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
6407 if !self.context.is_outbound() {
6408 panic!("Tried to create outbound funding_created message on an inbound channel!");
6411 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6412 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6414 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6416 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6417 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6418 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6419 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6422 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6423 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6425 // Now that we're past error-generating stuff, update our local state:
6427 self.context.channel_state = ChannelState::FundingNegotiated;
6428 self.context.channel_id = funding_txo.to_channel_id();
6430 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6431 // We can skip this if it is a zero-conf channel.
6432 if funding_transaction.is_coin_base() &&
6433 self.context.minimum_depth.unwrap_or(0) > 0 &&
6434 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6435 self.context.minimum_depth = Some(COINBASE_MATURITY);
6438 self.context.funding_transaction = Some(funding_transaction);
6439 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6441 let funding_created = self.get_funding_created_msg(logger);
6442 if funding_created.is_none() {
6443 if !self.context.signer_pending_funding {
6444 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6445 self.context.signer_pending_funding = true;
6452 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6453 // The default channel type (ie the first one we try) depends on whether the channel is
6454 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6455 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6456 // with no other changes, and fall back to `only_static_remotekey`.
6457 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6458 if !config.channel_handshake_config.announced_channel &&
6459 config.channel_handshake_config.negotiate_scid_privacy &&
6460 their_features.supports_scid_privacy() {
6461 ret.set_scid_privacy_required();
6464 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6465 // set it now. If they don't understand it, we'll fall back to our default of
6466 // `only_static_remotekey`.
6467 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6468 their_features.supports_anchors_zero_fee_htlc_tx() {
6469 ret.set_anchors_zero_fee_htlc_tx_required();
6475 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6476 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6477 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6478 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6479 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6480 ) -> Result<msgs::OpenChannel, ()>
6482 F::Target: FeeEstimator
6484 if !self.context.is_outbound() ||
6486 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6487 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6492 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6493 // We've exhausted our options
6496 // We support opening a few different types of channels. Try removing our additional
6497 // features one by one until we've either arrived at our default or the counterparty has
6500 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6501 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6502 // checks whether the counterparty supports every feature, this would only happen if the
6503 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6505 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6506 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6507 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6508 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6509 } else if self.context.channel_type.supports_scid_privacy() {
6510 self.context.channel_type.clear_scid_privacy();
6512 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6514 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6515 Ok(self.get_open_channel(chain_hash))
6518 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6519 if !self.context.is_outbound() {
6520 panic!("Tried to open a channel for an inbound channel?");
6522 if self.context.have_received_message() {
6523 panic!("Cannot generate an open_channel after we've moved forward");
6526 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6527 panic!("Tried to send an open_channel for a channel that has already advanced");
6530 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6531 let keys = self.context.get_holder_pubkeys();
6535 temporary_channel_id: self.context.channel_id,
6536 funding_satoshis: self.context.channel_value_satoshis,
6537 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6538 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6539 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6540 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6541 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6542 feerate_per_kw: self.context.feerate_per_kw as u32,
6543 to_self_delay: self.context.get_holder_selected_contest_delay(),
6544 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6545 funding_pubkey: keys.funding_pubkey,
6546 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6547 payment_point: keys.payment_point,
6548 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6549 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6550 first_per_commitment_point,
6551 channel_flags: if self.context.config.announced_channel {1} else {0},
6552 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6553 Some(script) => script.clone().into_inner(),
6554 None => Builder::new().into_script(),
6556 channel_type: Some(self.context.channel_type.clone()),
6561 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6562 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6564 // Check sanity of message fields:
6565 if !self.context.is_outbound() {
6566 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6568 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
6569 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6571 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6572 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6574 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6575 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6577 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6578 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6580 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6581 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6582 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6584 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6585 if msg.htlc_minimum_msat >= full_channel_value_msat {
6586 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6588 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6589 if msg.to_self_delay > max_delay_acceptable {
6590 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6592 if msg.max_accepted_htlcs < 1 {
6593 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6595 if msg.max_accepted_htlcs > MAX_HTLCS {
6596 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6599 // Now check against optional parameters as set by config...
6600 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6601 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6603 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6604 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6606 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6607 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6609 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6610 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6612 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6613 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6615 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6616 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6618 if msg.minimum_depth > peer_limits.max_minimum_depth {
6619 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6622 if let Some(ty) = &msg.channel_type {
6623 if *ty != self.context.channel_type {
6624 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6626 } else if their_features.supports_channel_type() {
6627 // Assume they've accepted the channel type as they said they understand it.
6629 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6630 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6631 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6633 self.context.channel_type = channel_type.clone();
6634 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6637 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6638 match &msg.shutdown_scriptpubkey {
6639 &Some(ref script) => {
6640 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6641 if script.len() == 0 {
6644 if !script::is_bolt2_compliant(&script, their_features) {
6645 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6647 Some(script.clone())
6650 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6652 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6657 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6658 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6659 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6660 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6661 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6663 if peer_limits.trust_own_funding_0conf {
6664 self.context.minimum_depth = Some(msg.minimum_depth);
6666 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6669 let counterparty_pubkeys = ChannelPublicKeys {
6670 funding_pubkey: msg.funding_pubkey,
6671 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6672 payment_point: msg.payment_point,
6673 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6674 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6677 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6678 selected_contest_delay: msg.to_self_delay,
6679 pubkeys: counterparty_pubkeys,
6682 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6683 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6685 self.context.channel_state = ChannelState::NegotiatingFunding(
6686 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6688 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6693 /// Handles a funding_signed message from the remote end.
6694 /// If this call is successful, broadcast the funding transaction (and not before!)
6695 pub fn funding_signed<L: Deref>(
6696 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
6697 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
6701 if !self.context.is_outbound() {
6702 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
6704 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
6705 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
6707 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6708 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6709 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6710 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6713 let funding_script = self.context.get_funding_redeemscript();
6715 let counterparty_keys = self.context.build_remote_transaction_keys();
6716 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6717 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6718 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6720 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6721 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6723 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6724 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
6726 let trusted_tx = initial_commitment_tx.trust();
6727 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6728 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6729 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
6730 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
6731 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
6735 let holder_commitment_tx = HolderCommitmentTransaction::new(
6736 initial_commitment_tx,
6739 &self.context.get_holder_pubkeys().funding_pubkey,
6740 self.context.counterparty_funding_pubkey()
6744 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
6745 if validated.is_err() {
6746 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6749 let funding_redeemscript = self.context.get_funding_redeemscript();
6750 let funding_txo = self.context.get_funding_txo().unwrap();
6751 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6752 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6753 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6754 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6755 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6756 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6757 shutdown_script, self.context.get_holder_selected_contest_delay(),
6758 &self.context.destination_script, (funding_txo, funding_txo_script),
6759 &self.context.channel_transaction_parameters,
6760 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6762 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6763 channel_monitor.provide_initial_counterparty_commitment_tx(
6764 counterparty_initial_bitcoin_tx.txid, Vec::new(),
6765 self.context.cur_counterparty_commitment_transaction_number,
6766 self.context.counterparty_cur_commitment_point.unwrap(),
6767 counterparty_initial_commitment_tx.feerate_per_kw(),
6768 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6769 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6771 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
6772 if self.context.is_batch_funding() {
6773 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
6775 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
6777 self.context.cur_holder_commitment_transaction_number -= 1;
6778 self.context.cur_counterparty_commitment_transaction_number -= 1;
6780 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
6782 let mut channel = Channel { context: self.context };
6784 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6785 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6786 Ok((channel, channel_monitor))
6789 /// Indicates that the signer may have some signatures for us, so we should retry if we're
6792 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6793 if self.context.signer_pending_funding && self.context.is_outbound() {
6794 log_trace!(logger, "Signer unblocked a funding_created");
6795 self.get_funding_created_msg(logger)
6800 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6801 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6802 pub context: ChannelContext<SP>,
6803 pub unfunded_context: UnfundedChannelContext,
6806 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6807 /// Creates a new channel from a remote sides' request for one.
6808 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6809 pub fn new<ES: Deref, F: Deref, L: Deref>(
6810 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6811 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6812 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6813 current_chain_height: u32, logger: &L, is_0conf: bool,
6814 ) -> Result<InboundV1Channel<SP>, ChannelError>
6815 where ES::Target: EntropySource,
6816 F::Target: FeeEstimator,
6819 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6820 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6822 // First check the channel type is known, failing before we do anything else if we don't
6823 // support this channel type.
6824 let channel_type = if let Some(channel_type) = &msg.channel_type {
6825 if channel_type.supports_any_optional_bits() {
6826 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6829 // We only support the channel types defined by the `ChannelManager` in
6830 // `provided_channel_type_features`. The channel type must always support
6831 // `static_remote_key`.
6832 if !channel_type.requires_static_remote_key() {
6833 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6835 // Make sure we support all of the features behind the channel type.
6836 if !channel_type.is_subset(our_supported_features) {
6837 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6839 if channel_type.requires_scid_privacy() && announced_channel {
6840 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6842 channel_type.clone()
6844 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6845 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6846 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6851 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6852 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6853 let pubkeys = holder_signer.pubkeys().clone();
6854 let counterparty_pubkeys = ChannelPublicKeys {
6855 funding_pubkey: msg.funding_pubkey,
6856 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6857 payment_point: msg.payment_point,
6858 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6859 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6862 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6863 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6866 // Check sanity of message fields:
6867 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6868 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6870 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6871 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6873 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6874 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6876 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6877 if msg.push_msat > full_channel_value_msat {
6878 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6880 if msg.dust_limit_satoshis > msg.funding_satoshis {
6881 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6883 if msg.htlc_minimum_msat >= full_channel_value_msat {
6884 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6886 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6888 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6889 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6890 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6892 if msg.max_accepted_htlcs < 1 {
6893 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6895 if msg.max_accepted_htlcs > MAX_HTLCS {
6896 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6899 // Now check against optional parameters as set by config...
6900 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6901 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6903 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6904 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6906 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6907 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6909 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6910 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6912 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6913 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6915 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6916 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6918 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6919 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6922 // Convert things into internal flags and prep our state:
6924 if config.channel_handshake_limits.force_announced_channel_preference {
6925 if config.channel_handshake_config.announced_channel != announced_channel {
6926 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6930 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6931 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6932 // Protocol level safety check in place, although it should never happen because
6933 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6934 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6936 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6937 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6939 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6940 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6941 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6943 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6944 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6947 // check if the funder's amount for the initial commitment tx is sufficient
6948 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6949 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6950 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6954 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6955 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6956 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6957 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6960 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6961 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6962 // want to push much to us), our counterparty should always have more than our reserve.
6963 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6964 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6967 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6968 match &msg.shutdown_scriptpubkey {
6969 &Some(ref script) => {
6970 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6971 if script.len() == 0 {
6974 if !script::is_bolt2_compliant(&script, their_features) {
6975 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6977 Some(script.clone())
6980 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6982 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6987 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6988 match signer_provider.get_shutdown_scriptpubkey() {
6989 Ok(scriptpubkey) => Some(scriptpubkey),
6990 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6994 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6995 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6996 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
7000 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
7001 Ok(script) => script,
7002 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
7005 let mut secp_ctx = Secp256k1::new();
7006 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7008 let minimum_depth = if is_0conf {
7011 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
7015 context: ChannelContext {
7018 config: LegacyChannelConfig {
7019 options: config.channel_config.clone(),
7021 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
7026 inbound_handshake_limits_override: None,
7028 temporary_channel_id: Some(msg.temporary_channel_id),
7029 channel_id: msg.temporary_channel_id,
7030 channel_state: ChannelState::NegotiatingFunding(
7031 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7033 announcement_sigs_state: AnnouncementSigsState::NotSent,
7036 latest_monitor_update_id: 0,
7038 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7039 shutdown_scriptpubkey,
7042 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7043 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7044 value_to_self_msat: msg.push_msat,
7046 pending_inbound_htlcs: Vec::new(),
7047 pending_outbound_htlcs: Vec::new(),
7048 holding_cell_htlc_updates: Vec::new(),
7049 pending_update_fee: None,
7050 holding_cell_update_fee: None,
7051 next_holder_htlc_id: 0,
7052 next_counterparty_htlc_id: 0,
7053 update_time_counter: 1,
7055 resend_order: RAACommitmentOrder::CommitmentFirst,
7057 monitor_pending_channel_ready: false,
7058 monitor_pending_revoke_and_ack: false,
7059 monitor_pending_commitment_signed: false,
7060 monitor_pending_forwards: Vec::new(),
7061 monitor_pending_failures: Vec::new(),
7062 monitor_pending_finalized_fulfills: Vec::new(),
7064 signer_pending_commitment_update: false,
7065 signer_pending_funding: false,
7067 #[cfg(debug_assertions)]
7068 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7069 #[cfg(debug_assertions)]
7070 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7072 last_sent_closing_fee: None,
7073 pending_counterparty_closing_signed: None,
7074 expecting_peer_commitment_signed: false,
7075 closing_fee_limits: None,
7076 target_closing_feerate_sats_per_kw: None,
7078 funding_tx_confirmed_in: None,
7079 funding_tx_confirmation_height: 0,
7080 short_channel_id: None,
7081 channel_creation_height: current_chain_height,
7083 feerate_per_kw: msg.feerate_per_kw,
7084 channel_value_satoshis: msg.funding_satoshis,
7085 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
7086 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7087 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
7088 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
7089 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7090 holder_selected_channel_reserve_satoshis,
7091 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
7092 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7093 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
7094 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7097 counterparty_forwarding_info: None,
7099 channel_transaction_parameters: ChannelTransactionParameters {
7100 holder_pubkeys: pubkeys,
7101 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7102 is_outbound_from_holder: false,
7103 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7104 selected_contest_delay: msg.to_self_delay,
7105 pubkeys: counterparty_pubkeys,
7107 funding_outpoint: None,
7108 channel_type_features: channel_type.clone()
7110 funding_transaction: None,
7111 is_batch_funding: None,
7113 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7114 counterparty_prev_commitment_point: None,
7115 counterparty_node_id,
7117 counterparty_shutdown_scriptpubkey,
7119 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7121 channel_update_status: ChannelUpdateStatus::Enabled,
7122 closing_signed_in_flight: false,
7124 announcement_sigs: None,
7126 #[cfg(any(test, fuzzing))]
7127 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7128 #[cfg(any(test, fuzzing))]
7129 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7131 workaround_lnd_bug_4006: None,
7132 sent_message_awaiting_response: None,
7134 latest_inbound_scid_alias: None,
7135 outbound_scid_alias: 0,
7137 channel_pending_event_emitted: false,
7138 channel_ready_event_emitted: false,
7140 #[cfg(any(test, fuzzing))]
7141 historical_inbound_htlc_fulfills: HashSet::new(),
7146 blocked_monitor_updates: Vec::new(),
7148 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7154 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7155 /// should be sent back to the counterparty node.
7157 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7158 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7159 if self.context.is_outbound() {
7160 panic!("Tried to send accept_channel for an outbound channel?");
7163 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7164 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7166 panic!("Tried to send accept_channel after channel had moved forward");
7168 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7169 panic!("Tried to send an accept_channel for a channel that has already advanced");
7172 self.generate_accept_channel_message()
7175 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7176 /// inbound channel. If the intention is to accept an inbound channel, use
7177 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7179 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7180 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7181 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7182 let keys = self.context.get_holder_pubkeys();
7184 msgs::AcceptChannel {
7185 temporary_channel_id: self.context.channel_id,
7186 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7187 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7188 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7189 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7190 minimum_depth: self.context.minimum_depth.unwrap(),
7191 to_self_delay: self.context.get_holder_selected_contest_delay(),
7192 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7193 funding_pubkey: keys.funding_pubkey,
7194 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7195 payment_point: keys.payment_point,
7196 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7197 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7198 first_per_commitment_point,
7199 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7200 Some(script) => script.clone().into_inner(),
7201 None => Builder::new().into_script(),
7203 channel_type: Some(self.context.channel_type.clone()),
7205 next_local_nonce: None,
7209 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7210 /// inbound channel without accepting it.
7212 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7214 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7215 self.generate_accept_channel_message()
7218 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7219 let funding_script = self.context.get_funding_redeemscript();
7221 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7222 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7223 let trusted_tx = initial_commitment_tx.trust();
7224 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7225 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7226 // They sign the holder commitment transaction...
7227 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7228 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7229 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7230 encode::serialize_hex(&funding_script), &self.context.channel_id());
7231 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7233 Ok(initial_commitment_tx)
7236 pub fn funding_created<L: Deref>(
7237 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7238 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7242 if self.context.is_outbound() {
7243 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7246 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7247 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7249 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7250 // remember the channel, so it's safe to just send an error_message here and drop the
7252 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7254 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7255 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7256 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7257 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7260 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7261 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7262 // This is an externally observable change before we finish all our checks. In particular
7263 // check_funding_created_signature may fail.
7264 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7266 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7268 Err(ChannelError::Close(e)) => {
7269 self.context.channel_transaction_parameters.funding_outpoint = None;
7270 return Err((self, ChannelError::Close(e)));
7273 // The only error we know how to handle is ChannelError::Close, so we fall over here
7274 // to make sure we don't continue with an inconsistent state.
7275 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7279 let holder_commitment_tx = HolderCommitmentTransaction::new(
7280 initial_commitment_tx,
7283 &self.context.get_holder_pubkeys().funding_pubkey,
7284 self.context.counterparty_funding_pubkey()
7287 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7288 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7291 // Now that we're past error-generating stuff, update our local state:
7293 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7294 self.context.channel_id = funding_txo.to_channel_id();
7295 self.context.cur_counterparty_commitment_transaction_number -= 1;
7296 self.context.cur_holder_commitment_transaction_number -= 1;
7298 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7300 let funding_redeemscript = self.context.get_funding_redeemscript();
7301 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7302 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7303 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7304 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7305 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7306 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7307 shutdown_script, self.context.get_holder_selected_contest_delay(),
7308 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7309 &self.context.channel_transaction_parameters,
7310 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7312 holder_commitment_tx, best_block, self.context.counterparty_node_id);
7313 channel_monitor.provide_initial_counterparty_commitment_tx(
7314 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7315 self.context.cur_counterparty_commitment_transaction_number + 1,
7316 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7317 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7318 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7320 log_info!(logger, "{} funding_signed for peer for channel {}",
7321 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7323 // Promote the channel to a full-fledged one now that we have updated the state and have a
7324 // `ChannelMonitor`.
7325 let mut channel = Channel {
7326 context: self.context,
7328 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7329 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7331 Ok((channel, funding_signed, channel_monitor))
7335 const SERIALIZATION_VERSION: u8 = 3;
7336 const MIN_SERIALIZATION_VERSION: u8 = 3;
7338 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7344 impl Writeable for ChannelUpdateStatus {
7345 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7346 // We only care about writing out the current state as it was announced, ie only either
7347 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7348 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7350 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7351 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7352 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7353 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7359 impl Readable for ChannelUpdateStatus {
7360 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7361 Ok(match <u8 as Readable>::read(reader)? {
7362 0 => ChannelUpdateStatus::Enabled,
7363 1 => ChannelUpdateStatus::Disabled,
7364 _ => return Err(DecodeError::InvalidValue),
7369 impl Writeable for AnnouncementSigsState {
7370 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7371 // We only care about writing out the current state as if we had just disconnected, at
7372 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7374 AnnouncementSigsState::NotSent => 0u8.write(writer),
7375 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7376 AnnouncementSigsState::Committed => 0u8.write(writer),
7377 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7382 impl Readable for AnnouncementSigsState {
7383 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7384 Ok(match <u8 as Readable>::read(reader)? {
7385 0 => AnnouncementSigsState::NotSent,
7386 1 => AnnouncementSigsState::PeerReceived,
7387 _ => return Err(DecodeError::InvalidValue),
7392 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7393 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7394 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7397 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7399 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7400 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7401 // the low bytes now and the optional high bytes later.
7402 let user_id_low = self.context.user_id as u64;
7403 user_id_low.write(writer)?;
7405 // Version 1 deserializers expected to read parts of the config object here. Version 2
7406 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7407 // `minimum_depth` we simply write dummy values here.
7408 writer.write_all(&[0; 8])?;
7410 self.context.channel_id.write(writer)?;
7412 let mut channel_state = self.context.channel_state;
7413 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7414 channel_state.set_peer_disconnected();
7416 channel_state.to_u32().write(writer)?;
7418 self.context.channel_value_satoshis.write(writer)?;
7420 self.context.latest_monitor_update_id.write(writer)?;
7422 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7423 // deserialized from that format.
7424 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7425 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7426 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7428 self.context.destination_script.write(writer)?;
7430 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7431 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7432 self.context.value_to_self_msat.write(writer)?;
7434 let mut dropped_inbound_htlcs = 0;
7435 for htlc in self.context.pending_inbound_htlcs.iter() {
7436 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7437 dropped_inbound_htlcs += 1;
7440 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7441 for htlc in self.context.pending_inbound_htlcs.iter() {
7442 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7445 htlc.htlc_id.write(writer)?;
7446 htlc.amount_msat.write(writer)?;
7447 htlc.cltv_expiry.write(writer)?;
7448 htlc.payment_hash.write(writer)?;
7450 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7451 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7453 htlc_state.write(writer)?;
7455 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7457 htlc_state.write(writer)?;
7459 &InboundHTLCState::Committed => {
7462 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7464 removal_reason.write(writer)?;
7469 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7470 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7471 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7473 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7474 for htlc in self.context.pending_outbound_htlcs.iter() {
7475 htlc.htlc_id.write(writer)?;
7476 htlc.amount_msat.write(writer)?;
7477 htlc.cltv_expiry.write(writer)?;
7478 htlc.payment_hash.write(writer)?;
7479 htlc.source.write(writer)?;
7481 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7483 onion_packet.write(writer)?;
7485 &OutboundHTLCState::Committed => {
7488 &OutboundHTLCState::RemoteRemoved(_) => {
7489 // Treat this as a Committed because we haven't received the CS - they'll
7490 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7493 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7495 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7496 preimages.push(preimage);
7498 let reason: Option<&HTLCFailReason> = outcome.into();
7499 reason.write(writer)?;
7501 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7503 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7504 preimages.push(preimage);
7506 let reason: Option<&HTLCFailReason> = outcome.into();
7507 reason.write(writer)?;
7510 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7511 pending_outbound_blinding_points.push(htlc.blinding_point);
7514 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7515 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7516 // Vec of (htlc_id, failure_code, sha256_of_onion)
7517 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
7518 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7519 for update in self.context.holding_cell_htlc_updates.iter() {
7521 &HTLCUpdateAwaitingACK::AddHTLC {
7522 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7523 blinding_point, skimmed_fee_msat,
7526 amount_msat.write(writer)?;
7527 cltv_expiry.write(writer)?;
7528 payment_hash.write(writer)?;
7529 source.write(writer)?;
7530 onion_routing_packet.write(writer)?;
7532 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7533 holding_cell_blinding_points.push(blinding_point);
7535 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7537 payment_preimage.write(writer)?;
7538 htlc_id.write(writer)?;
7540 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7542 htlc_id.write(writer)?;
7543 err_packet.write(writer)?;
7545 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
7546 htlc_id, failure_code, sha256_of_onion
7548 // We don't want to break downgrading by adding a new variant, so write a dummy
7549 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
7550 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
7552 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
7554 htlc_id.write(writer)?;
7555 dummy_err_packet.write(writer)?;
7560 match self.context.resend_order {
7561 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7562 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7565 self.context.monitor_pending_channel_ready.write(writer)?;
7566 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7567 self.context.monitor_pending_commitment_signed.write(writer)?;
7569 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7570 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7571 pending_forward.write(writer)?;
7572 htlc_id.write(writer)?;
7575 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7576 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7577 htlc_source.write(writer)?;
7578 payment_hash.write(writer)?;
7579 fail_reason.write(writer)?;
7582 if self.context.is_outbound() {
7583 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7584 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7585 Some(feerate).write(writer)?;
7587 // As for inbound HTLCs, if the update was only announced and never committed in a
7588 // commitment_signed, drop it.
7589 None::<u32>.write(writer)?;
7591 self.context.holding_cell_update_fee.write(writer)?;
7593 self.context.next_holder_htlc_id.write(writer)?;
7594 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7595 self.context.update_time_counter.write(writer)?;
7596 self.context.feerate_per_kw.write(writer)?;
7598 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7599 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7600 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7601 // consider the stale state on reload.
7604 self.context.funding_tx_confirmed_in.write(writer)?;
7605 self.context.funding_tx_confirmation_height.write(writer)?;
7606 self.context.short_channel_id.write(writer)?;
7608 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7609 self.context.holder_dust_limit_satoshis.write(writer)?;
7610 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7612 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7613 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7615 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7616 self.context.holder_htlc_minimum_msat.write(writer)?;
7617 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7619 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7620 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7622 match &self.context.counterparty_forwarding_info {
7625 info.fee_base_msat.write(writer)?;
7626 info.fee_proportional_millionths.write(writer)?;
7627 info.cltv_expiry_delta.write(writer)?;
7629 None => 0u8.write(writer)?
7632 self.context.channel_transaction_parameters.write(writer)?;
7633 self.context.funding_transaction.write(writer)?;
7635 self.context.counterparty_cur_commitment_point.write(writer)?;
7636 self.context.counterparty_prev_commitment_point.write(writer)?;
7637 self.context.counterparty_node_id.write(writer)?;
7639 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7641 self.context.commitment_secrets.write(writer)?;
7643 self.context.channel_update_status.write(writer)?;
7645 #[cfg(any(test, fuzzing))]
7646 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7647 #[cfg(any(test, fuzzing))]
7648 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7649 htlc.write(writer)?;
7652 // If the channel type is something other than only-static-remote-key, then we need to have
7653 // older clients fail to deserialize this channel at all. If the type is
7654 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7656 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7657 Some(&self.context.channel_type) } else { None };
7659 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7660 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7661 // a different percentage of the channel value then 10%, which older versions of LDK used
7662 // to set it to before the percentage was made configurable.
7663 let serialized_holder_selected_reserve =
7664 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7665 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7667 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7668 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7669 let serialized_holder_htlc_max_in_flight =
7670 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7671 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7673 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7674 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7676 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7677 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7678 // we write the high bytes as an option here.
7679 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7681 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7683 write_tlv_fields!(writer, {
7684 (0, self.context.announcement_sigs, option),
7685 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7686 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7687 // them twice, once with their original default values above, and once as an option
7688 // here. On the read side, old versions will simply ignore the odd-type entries here,
7689 // and new versions map the default values to None and allow the TLV entries here to
7691 (1, self.context.minimum_depth, option),
7692 (2, chan_type, option),
7693 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7694 (4, serialized_holder_selected_reserve, option),
7695 (5, self.context.config, required),
7696 (6, serialized_holder_htlc_max_in_flight, option),
7697 (7, self.context.shutdown_scriptpubkey, option),
7698 (8, self.context.blocked_monitor_updates, optional_vec),
7699 (9, self.context.target_closing_feerate_sats_per_kw, option),
7700 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7701 (13, self.context.channel_creation_height, required),
7702 (15, preimages, required_vec),
7703 (17, self.context.announcement_sigs_state, required),
7704 (19, self.context.latest_inbound_scid_alias, option),
7705 (21, self.context.outbound_scid_alias, required),
7706 (23, channel_ready_event_emitted, option),
7707 (25, user_id_high_opt, option),
7708 (27, self.context.channel_keys_id, required),
7709 (28, holder_max_accepted_htlcs, option),
7710 (29, self.context.temporary_channel_id, option),
7711 (31, channel_pending_event_emitted, option),
7712 (35, pending_outbound_skimmed_fees, optional_vec),
7713 (37, holding_cell_skimmed_fees, optional_vec),
7714 (38, self.context.is_batch_funding, option),
7715 (39, pending_outbound_blinding_points, optional_vec),
7716 (41, holding_cell_blinding_points, optional_vec),
7717 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
7724 const MAX_ALLOC_SIZE: usize = 64*1024;
7725 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7727 ES::Target: EntropySource,
7728 SP::Target: SignerProvider
7730 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7731 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7732 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7734 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7735 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7736 // the low bytes now and the high bytes later.
7737 let user_id_low: u64 = Readable::read(reader)?;
7739 let mut config = Some(LegacyChannelConfig::default());
7741 // Read the old serialization of the ChannelConfig from version 0.0.98.
7742 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7743 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7744 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7745 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7747 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7748 let mut _val: u64 = Readable::read(reader)?;
7751 let channel_id = Readable::read(reader)?;
7752 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
7753 let channel_value_satoshis = Readable::read(reader)?;
7755 let latest_monitor_update_id = Readable::read(reader)?;
7757 let mut keys_data = None;
7759 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7760 // the `channel_keys_id` TLV is present below.
7761 let keys_len: u32 = Readable::read(reader)?;
7762 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7763 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7764 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7765 let mut data = [0; 1024];
7766 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7767 reader.read_exact(read_slice)?;
7768 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7772 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7773 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7774 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7777 let destination_script = Readable::read(reader)?;
7779 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7780 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7781 let value_to_self_msat = Readable::read(reader)?;
7783 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7785 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7786 for _ in 0..pending_inbound_htlc_count {
7787 pending_inbound_htlcs.push(InboundHTLCOutput {
7788 htlc_id: Readable::read(reader)?,
7789 amount_msat: Readable::read(reader)?,
7790 cltv_expiry: Readable::read(reader)?,
7791 payment_hash: Readable::read(reader)?,
7792 state: match <u8 as Readable>::read(reader)? {
7793 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7794 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7795 3 => InboundHTLCState::Committed,
7796 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7797 _ => return Err(DecodeError::InvalidValue),
7802 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7803 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7804 for _ in 0..pending_outbound_htlc_count {
7805 pending_outbound_htlcs.push(OutboundHTLCOutput {
7806 htlc_id: Readable::read(reader)?,
7807 amount_msat: Readable::read(reader)?,
7808 cltv_expiry: Readable::read(reader)?,
7809 payment_hash: Readable::read(reader)?,
7810 source: Readable::read(reader)?,
7811 state: match <u8 as Readable>::read(reader)? {
7812 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7813 1 => OutboundHTLCState::Committed,
7815 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7816 OutboundHTLCState::RemoteRemoved(option.into())
7819 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7820 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7823 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7824 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7826 _ => return Err(DecodeError::InvalidValue),
7828 skimmed_fee_msat: None,
7829 blinding_point: None,
7833 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7834 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7835 for _ in 0..holding_cell_htlc_update_count {
7836 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7837 0 => HTLCUpdateAwaitingACK::AddHTLC {
7838 amount_msat: Readable::read(reader)?,
7839 cltv_expiry: Readable::read(reader)?,
7840 payment_hash: Readable::read(reader)?,
7841 source: Readable::read(reader)?,
7842 onion_routing_packet: Readable::read(reader)?,
7843 skimmed_fee_msat: None,
7844 blinding_point: None,
7846 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7847 payment_preimage: Readable::read(reader)?,
7848 htlc_id: Readable::read(reader)?,
7850 2 => HTLCUpdateAwaitingACK::FailHTLC {
7851 htlc_id: Readable::read(reader)?,
7852 err_packet: Readable::read(reader)?,
7854 _ => return Err(DecodeError::InvalidValue),
7858 let resend_order = match <u8 as Readable>::read(reader)? {
7859 0 => RAACommitmentOrder::CommitmentFirst,
7860 1 => RAACommitmentOrder::RevokeAndACKFirst,
7861 _ => return Err(DecodeError::InvalidValue),
7864 let monitor_pending_channel_ready = Readable::read(reader)?;
7865 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7866 let monitor_pending_commitment_signed = Readable::read(reader)?;
7868 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7869 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7870 for _ in 0..monitor_pending_forwards_count {
7871 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7874 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7875 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7876 for _ in 0..monitor_pending_failures_count {
7877 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7880 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7882 let holding_cell_update_fee = Readable::read(reader)?;
7884 let next_holder_htlc_id = Readable::read(reader)?;
7885 let next_counterparty_htlc_id = Readable::read(reader)?;
7886 let update_time_counter = Readable::read(reader)?;
7887 let feerate_per_kw = Readable::read(reader)?;
7889 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7890 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7891 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7892 // consider the stale state on reload.
7893 match <u8 as Readable>::read(reader)? {
7896 let _: u32 = Readable::read(reader)?;
7897 let _: u64 = Readable::read(reader)?;
7898 let _: Signature = Readable::read(reader)?;
7900 _ => return Err(DecodeError::InvalidValue),
7903 let funding_tx_confirmed_in = Readable::read(reader)?;
7904 let funding_tx_confirmation_height = Readable::read(reader)?;
7905 let short_channel_id = Readable::read(reader)?;
7907 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7908 let holder_dust_limit_satoshis = Readable::read(reader)?;
7909 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7910 let mut counterparty_selected_channel_reserve_satoshis = None;
7912 // Read the old serialization from version 0.0.98.
7913 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7915 // Read the 8 bytes of backwards-compatibility data.
7916 let _dummy: u64 = Readable::read(reader)?;
7918 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7919 let holder_htlc_minimum_msat = Readable::read(reader)?;
7920 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7922 let mut minimum_depth = None;
7924 // Read the old serialization from version 0.0.98.
7925 minimum_depth = Some(Readable::read(reader)?);
7927 // Read the 4 bytes of backwards-compatibility data.
7928 let _dummy: u32 = Readable::read(reader)?;
7931 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7933 1 => Some(CounterpartyForwardingInfo {
7934 fee_base_msat: Readable::read(reader)?,
7935 fee_proportional_millionths: Readable::read(reader)?,
7936 cltv_expiry_delta: Readable::read(reader)?,
7938 _ => return Err(DecodeError::InvalidValue),
7941 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7942 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7944 let counterparty_cur_commitment_point = Readable::read(reader)?;
7946 let counterparty_prev_commitment_point = Readable::read(reader)?;
7947 let counterparty_node_id = Readable::read(reader)?;
7949 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7950 let commitment_secrets = Readable::read(reader)?;
7952 let channel_update_status = Readable::read(reader)?;
7954 #[cfg(any(test, fuzzing))]
7955 let mut historical_inbound_htlc_fulfills = HashSet::new();
7956 #[cfg(any(test, fuzzing))]
7958 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7959 for _ in 0..htlc_fulfills_len {
7960 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7964 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7965 Some((feerate, if channel_parameters.is_outbound_from_holder {
7966 FeeUpdateState::Outbound
7968 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7974 let mut announcement_sigs = None;
7975 let mut target_closing_feerate_sats_per_kw = None;
7976 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7977 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7978 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7979 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7980 // only, so we default to that if none was written.
7981 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7982 let mut channel_creation_height = Some(serialized_height);
7983 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7985 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7986 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7987 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7988 let mut latest_inbound_scid_alias = None;
7989 let mut outbound_scid_alias = None;
7990 let mut channel_pending_event_emitted = None;
7991 let mut channel_ready_event_emitted = None;
7993 let mut user_id_high_opt: Option<u64> = None;
7994 let mut channel_keys_id: Option<[u8; 32]> = None;
7995 let mut temporary_channel_id: Option<ChannelId> = None;
7996 let mut holder_max_accepted_htlcs: Option<u16> = None;
7998 let mut blocked_monitor_updates = Some(Vec::new());
8000 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8001 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8003 let mut is_batch_funding: Option<()> = None;
8005 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8006 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8008 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8010 read_tlv_fields!(reader, {
8011 (0, announcement_sigs, option),
8012 (1, minimum_depth, option),
8013 (2, channel_type, option),
8014 (3, counterparty_selected_channel_reserve_satoshis, option),
8015 (4, holder_selected_channel_reserve_satoshis, option),
8016 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8017 (6, holder_max_htlc_value_in_flight_msat, option),
8018 (7, shutdown_scriptpubkey, option),
8019 (8, blocked_monitor_updates, optional_vec),
8020 (9, target_closing_feerate_sats_per_kw, option),
8021 (11, monitor_pending_finalized_fulfills, optional_vec),
8022 (13, channel_creation_height, option),
8023 (15, preimages_opt, optional_vec),
8024 (17, announcement_sigs_state, option),
8025 (19, latest_inbound_scid_alias, option),
8026 (21, outbound_scid_alias, option),
8027 (23, channel_ready_event_emitted, option),
8028 (25, user_id_high_opt, option),
8029 (27, channel_keys_id, option),
8030 (28, holder_max_accepted_htlcs, option),
8031 (29, temporary_channel_id, option),
8032 (31, channel_pending_event_emitted, option),
8033 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8034 (37, holding_cell_skimmed_fees_opt, optional_vec),
8035 (38, is_batch_funding, option),
8036 (39, pending_outbound_blinding_points_opt, optional_vec),
8037 (41, holding_cell_blinding_points_opt, optional_vec),
8038 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8041 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8042 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8043 // If we've gotten to the funding stage of the channel, populate the signer with its
8044 // required channel parameters.
8045 if channel_state >= ChannelState::FundingNegotiated {
8046 holder_signer.provide_channel_parameters(&channel_parameters);
8048 (channel_keys_id, holder_signer)
8050 // `keys_data` can be `None` if we had corrupted data.
8051 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8052 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8053 (holder_signer.channel_keys_id(), holder_signer)
8056 if let Some(preimages) = preimages_opt {
8057 let mut iter = preimages.into_iter();
8058 for htlc in pending_outbound_htlcs.iter_mut() {
8060 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8061 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8063 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8064 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8069 // We expect all preimages to be consumed above
8070 if iter.next().is_some() {
8071 return Err(DecodeError::InvalidValue);
8075 let chan_features = channel_type.as_ref().unwrap();
8076 if !chan_features.is_subset(our_supported_features) {
8077 // If the channel was written by a new version and negotiated with features we don't
8078 // understand yet, refuse to read it.
8079 return Err(DecodeError::UnknownRequiredFeature);
8082 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8083 // To account for that, we're proactively setting/overriding the field here.
8084 channel_parameters.channel_type_features = chan_features.clone();
8086 let mut secp_ctx = Secp256k1::new();
8087 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8089 // `user_id` used to be a single u64 value. In order to remain backwards
8090 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8091 // separate u64 values.
8092 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8094 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8096 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8097 let mut iter = skimmed_fees.into_iter();
8098 for htlc in pending_outbound_htlcs.iter_mut() {
8099 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8101 // We expect all skimmed fees to be consumed above
8102 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8104 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8105 let mut iter = skimmed_fees.into_iter();
8106 for htlc in holding_cell_htlc_updates.iter_mut() {
8107 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8108 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8111 // We expect all skimmed fees to be consumed above
8112 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8114 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8115 let mut iter = blinding_pts.into_iter();
8116 for htlc in pending_outbound_htlcs.iter_mut() {
8117 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8119 // We expect all blinding points to be consumed above
8120 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8122 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8123 let mut iter = blinding_pts.into_iter();
8124 for htlc in holding_cell_htlc_updates.iter_mut() {
8125 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8126 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8129 // We expect all blinding points to be consumed above
8130 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8133 if let Some(malformed_htlcs) = malformed_htlcs {
8134 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
8135 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
8136 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
8137 let matches = *htlc_id == malformed_htlc_id;
8138 if matches { debug_assert!(err_packet.data.is_empty()) }
8141 }).ok_or(DecodeError::InvalidValue)?;
8142 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
8143 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
8145 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
8150 context: ChannelContext {
8153 config: config.unwrap(),
8157 // Note that we don't care about serializing handshake limits as we only ever serialize
8158 // channel data after the handshake has completed.
8159 inbound_handshake_limits_override: None,
8162 temporary_channel_id,
8164 announcement_sigs_state: announcement_sigs_state.unwrap(),
8166 channel_value_satoshis,
8168 latest_monitor_update_id,
8170 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8171 shutdown_scriptpubkey,
8174 cur_holder_commitment_transaction_number,
8175 cur_counterparty_commitment_transaction_number,
8178 holder_max_accepted_htlcs,
8179 pending_inbound_htlcs,
8180 pending_outbound_htlcs,
8181 holding_cell_htlc_updates,
8185 monitor_pending_channel_ready,
8186 monitor_pending_revoke_and_ack,
8187 monitor_pending_commitment_signed,
8188 monitor_pending_forwards,
8189 monitor_pending_failures,
8190 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8192 signer_pending_commitment_update: false,
8193 signer_pending_funding: false,
8196 holding_cell_update_fee,
8197 next_holder_htlc_id,
8198 next_counterparty_htlc_id,
8199 update_time_counter,
8202 #[cfg(debug_assertions)]
8203 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8204 #[cfg(debug_assertions)]
8205 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8207 last_sent_closing_fee: None,
8208 pending_counterparty_closing_signed: None,
8209 expecting_peer_commitment_signed: false,
8210 closing_fee_limits: None,
8211 target_closing_feerate_sats_per_kw,
8213 funding_tx_confirmed_in,
8214 funding_tx_confirmation_height,
8216 channel_creation_height: channel_creation_height.unwrap(),
8218 counterparty_dust_limit_satoshis,
8219 holder_dust_limit_satoshis,
8220 counterparty_max_htlc_value_in_flight_msat,
8221 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8222 counterparty_selected_channel_reserve_satoshis,
8223 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8224 counterparty_htlc_minimum_msat,
8225 holder_htlc_minimum_msat,
8226 counterparty_max_accepted_htlcs,
8229 counterparty_forwarding_info,
8231 channel_transaction_parameters: channel_parameters,
8232 funding_transaction,
8235 counterparty_cur_commitment_point,
8236 counterparty_prev_commitment_point,
8237 counterparty_node_id,
8239 counterparty_shutdown_scriptpubkey,
8243 channel_update_status,
8244 closing_signed_in_flight: false,
8248 #[cfg(any(test, fuzzing))]
8249 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8250 #[cfg(any(test, fuzzing))]
8251 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8253 workaround_lnd_bug_4006: None,
8254 sent_message_awaiting_response: None,
8256 latest_inbound_scid_alias,
8257 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8258 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8260 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8261 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8263 #[cfg(any(test, fuzzing))]
8264 historical_inbound_htlc_fulfills,
8266 channel_type: channel_type.unwrap(),
8269 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8278 use bitcoin::blockdata::constants::ChainHash;
8279 use bitcoin::blockdata::script::{ScriptBuf, Builder};
8280 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8281 use bitcoin::blockdata::opcodes;
8282 use bitcoin::network::constants::Network;
8283 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
8284 use crate::ln::{PaymentHash, PaymentPreimage};
8285 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8286 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8287 use crate::ln::channel::InitFeatures;
8288 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8289 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8290 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8291 use crate::ln::msgs;
8292 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8293 use crate::ln::script::ShutdownScript;
8294 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8295 use crate::chain::BestBlock;
8296 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8297 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8298 use crate::chain::transaction::OutPoint;
8299 use crate::routing::router::{Path, RouteHop};
8300 use crate::util::config::UserConfig;
8301 use crate::util::errors::APIError;
8302 use crate::util::ser::{ReadableArgs, Writeable};
8303 use crate::util::test_utils;
8304 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8305 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8306 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8307 use bitcoin::secp256k1::{SecretKey,PublicKey};
8308 use bitcoin::hashes::sha256::Hash as Sha256;
8309 use bitcoin::hashes::Hash;
8310 use bitcoin::hashes::hex::FromHex;
8311 use bitcoin::hash_types::WPubkeyHash;
8312 use bitcoin::blockdata::locktime::absolute::LockTime;
8313 use bitcoin::address::{WitnessProgram, WitnessVersion};
8314 use crate::prelude::*;
8316 struct TestFeeEstimator {
8319 impl FeeEstimator for TestFeeEstimator {
8320 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8326 fn test_max_funding_satoshis_no_wumbo() {
8327 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8328 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8329 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8333 signer: InMemorySigner,
8336 impl EntropySource for Keys {
8337 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8340 impl SignerProvider for Keys {
8341 type EcdsaSigner = InMemorySigner;
8343 type TaprootSigner = InMemorySigner;
8345 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8346 self.signer.channel_keys_id()
8349 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8353 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8355 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8356 let secp_ctx = Secp256k1::signing_only();
8357 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8358 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8359 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8362 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8363 let secp_ctx = Secp256k1::signing_only();
8364 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8365 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8369 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8370 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8371 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8375 fn upfront_shutdown_script_incompatibility() {
8376 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8377 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8378 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8381 let seed = [42; 32];
8382 let network = Network::Testnet;
8383 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8384 keys_provider.expect(OnGetShutdownScriptpubkey {
8385 returns: non_v0_segwit_shutdown_script.clone(),
8388 let secp_ctx = Secp256k1::new();
8389 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8390 let config = UserConfig::default();
8391 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8392 Err(APIError::IncompatibleShutdownScript { script }) => {
8393 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8395 Err(e) => panic!("Unexpected error: {:?}", e),
8396 Ok(_) => panic!("Expected error"),
8400 // Check that, during channel creation, we use the same feerate in the open channel message
8401 // as we do in the Channel object creation itself.
8403 fn test_open_channel_msg_fee() {
8404 let original_fee = 253;
8405 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8406 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8407 let secp_ctx = Secp256k1::new();
8408 let seed = [42; 32];
8409 let network = Network::Testnet;
8410 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8412 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8413 let config = UserConfig::default();
8414 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8416 // Now change the fee so we can check that the fee in the open_channel message is the
8417 // same as the old fee.
8418 fee_est.fee_est = 500;
8419 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8420 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8424 fn test_holder_vs_counterparty_dust_limit() {
8425 // Test that when calculating the local and remote commitment transaction fees, the correct
8426 // dust limits are used.
8427 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8428 let secp_ctx = Secp256k1::new();
8429 let seed = [42; 32];
8430 let network = Network::Testnet;
8431 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8432 let logger = test_utils::TestLogger::new();
8433 let best_block = BestBlock::from_network(network);
8435 // Go through the flow of opening a channel between two nodes, making sure
8436 // they have different dust limits.
8438 // Create Node A's channel pointing to Node B's pubkey
8439 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8440 let config = UserConfig::default();
8441 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8443 // Create Node B's channel by receiving Node A's open_channel message
8444 // Make sure A's dust limit is as we expect.
8445 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8446 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8447 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8449 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8450 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8451 accept_channel_msg.dust_limit_satoshis = 546;
8452 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8453 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8455 // Node A --> Node B: funding created
8456 let output_script = node_a_chan.context.get_funding_redeemscript();
8457 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8458 value: 10000000, script_pubkey: output_script.clone(),
8460 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8461 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8462 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8464 // Node B --> Node A: funding signed
8465 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8466 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8468 // Put some inbound and outbound HTLCs in A's channel.
8469 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8470 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8472 amount_msat: htlc_amount_msat,
8473 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8474 cltv_expiry: 300000000,
8475 state: InboundHTLCState::Committed,
8478 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8480 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8481 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8482 cltv_expiry: 200000000,
8483 state: OutboundHTLCState::Committed,
8484 source: HTLCSource::OutboundRoute {
8485 path: Path { hops: Vec::new(), blinded_tail: None },
8486 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8487 first_hop_htlc_msat: 548,
8488 payment_id: PaymentId([42; 32]),
8490 skimmed_fee_msat: None,
8491 blinding_point: None,
8494 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8495 // the dust limit check.
8496 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8497 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8498 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8499 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8501 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8502 // of the HTLCs are seen to be above the dust limit.
8503 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8504 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8505 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8506 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8507 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8511 fn test_timeout_vs_success_htlc_dust_limit() {
8512 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8513 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8514 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8515 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8516 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8517 let secp_ctx = Secp256k1::new();
8518 let seed = [42; 32];
8519 let network = Network::Testnet;
8520 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8522 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8523 let config = UserConfig::default();
8524 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8526 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8527 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8529 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8530 // counted as dust when it shouldn't be.
8531 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8532 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8533 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8534 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8536 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8537 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8538 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8539 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8540 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8542 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8544 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8545 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8546 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8547 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8548 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8550 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8551 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8552 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8553 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8554 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8558 fn channel_reestablish_no_updates() {
8559 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8560 let logger = test_utils::TestLogger::new();
8561 let secp_ctx = Secp256k1::new();
8562 let seed = [42; 32];
8563 let network = Network::Testnet;
8564 let best_block = BestBlock::from_network(network);
8565 let chain_hash = ChainHash::using_genesis_block(network);
8566 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8568 // Go through the flow of opening a channel between two nodes.
8570 // Create Node A's channel pointing to Node B's pubkey
8571 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8572 let config = UserConfig::default();
8573 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8575 // Create Node B's channel by receiving Node A's open_channel message
8576 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8577 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8578 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8580 // Node B --> Node A: accept channel
8581 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8582 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8584 // Node A --> Node B: funding created
8585 let output_script = node_a_chan.context.get_funding_redeemscript();
8586 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8587 value: 10000000, script_pubkey: output_script.clone(),
8589 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8590 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8591 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8593 // Node B --> Node A: funding signed
8594 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8595 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8597 // Now disconnect the two nodes and check that the commitment point in
8598 // Node B's channel_reestablish message is sane.
8599 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8600 let msg = node_b_chan.get_channel_reestablish(&&logger);
8601 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8602 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8603 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8605 // Check that the commitment point in Node A's channel_reestablish message
8607 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8608 let msg = node_a_chan.get_channel_reestablish(&&logger);
8609 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8610 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8611 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8615 fn test_configured_holder_max_htlc_value_in_flight() {
8616 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8617 let logger = test_utils::TestLogger::new();
8618 let secp_ctx = Secp256k1::new();
8619 let seed = [42; 32];
8620 let network = Network::Testnet;
8621 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8622 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8623 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8625 let mut config_2_percent = UserConfig::default();
8626 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8627 let mut config_99_percent = UserConfig::default();
8628 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8629 let mut config_0_percent = UserConfig::default();
8630 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8631 let mut config_101_percent = UserConfig::default();
8632 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8634 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8635 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8636 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8637 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8638 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8639 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8641 // Test with the upper bound - 1 of valid values (99%).
8642 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8643 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8644 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8646 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8648 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8649 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8650 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8651 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8652 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8653 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8655 // Test with the upper bound - 1 of valid values (99%).
8656 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8657 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8658 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8660 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8661 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8662 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8663 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8664 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8666 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8667 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8669 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8670 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8671 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8673 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8674 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8675 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8676 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8677 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8679 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8680 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8682 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8683 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8684 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8688 fn test_configured_holder_selected_channel_reserve_satoshis() {
8690 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8691 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8692 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8694 // Test with valid but unreasonably high channel reserves
8695 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8696 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8697 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8699 // Test with calculated channel reserve less than lower bound
8700 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8701 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8703 // Test with invalid channel reserves since sum of both is greater than or equal
8705 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8706 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8709 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8710 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8711 let logger = test_utils::TestLogger::new();
8712 let secp_ctx = Secp256k1::new();
8713 let seed = [42; 32];
8714 let network = Network::Testnet;
8715 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8716 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8717 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8720 let mut outbound_node_config = UserConfig::default();
8721 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8722 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8724 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8725 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8727 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8728 let mut inbound_node_config = UserConfig::default();
8729 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8731 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8732 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8734 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8736 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8737 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8739 // Channel Negotiations failed
8740 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8741 assert!(result.is_err());
8746 fn channel_update() {
8747 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8748 let logger = test_utils::TestLogger::new();
8749 let secp_ctx = Secp256k1::new();
8750 let seed = [42; 32];
8751 let network = Network::Testnet;
8752 let best_block = BestBlock::from_network(network);
8753 let chain_hash = ChainHash::using_genesis_block(network);
8754 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8756 // Create Node A's channel pointing to Node B's pubkey
8757 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8758 let config = UserConfig::default();
8759 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8761 // Create Node B's channel by receiving Node A's open_channel message
8762 // Make sure A's dust limit is as we expect.
8763 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8764 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8765 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8767 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8768 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8769 accept_channel_msg.dust_limit_satoshis = 546;
8770 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8771 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8773 // Node A --> Node B: funding created
8774 let output_script = node_a_chan.context.get_funding_redeemscript();
8775 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8776 value: 10000000, script_pubkey: output_script.clone(),
8778 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8779 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8780 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8782 // Node B --> Node A: funding signed
8783 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8784 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8786 // Make sure that receiving a channel update will update the Channel as expected.
8787 let update = ChannelUpdate {
8788 contents: UnsignedChannelUpdate {
8790 short_channel_id: 0,
8793 cltv_expiry_delta: 100,
8794 htlc_minimum_msat: 5,
8795 htlc_maximum_msat: MAX_VALUE_MSAT,
8797 fee_proportional_millionths: 11,
8798 excess_data: Vec::new(),
8800 signature: Signature::from(unsafe { FFISignature::new() })
8802 assert!(node_a_chan.channel_update(&update).unwrap());
8804 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8805 // change our official htlc_minimum_msat.
8806 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8807 match node_a_chan.context.counterparty_forwarding_info() {
8809 assert_eq!(info.cltv_expiry_delta, 100);
8810 assert_eq!(info.fee_base_msat, 110);
8811 assert_eq!(info.fee_proportional_millionths, 11);
8813 None => panic!("expected counterparty forwarding info to be Some")
8816 assert!(!node_a_chan.channel_update(&update).unwrap());
8820 fn blinding_point_skimmed_fee_malformed_ser() {
8821 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
8823 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8824 let secp_ctx = Secp256k1::new();
8825 let seed = [42; 32];
8826 let network = Network::Testnet;
8827 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8829 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8830 let config = UserConfig::default();
8831 let features = channelmanager::provided_init_features(&config);
8832 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8833 let mut chan = Channel { context: outbound_chan.context };
8835 let dummy_htlc_source = HTLCSource::OutboundRoute {
8837 hops: vec![RouteHop {
8838 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8839 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8840 cltv_expiry_delta: 0, maybe_announced_channel: false,
8844 session_priv: test_utils::privkey(42),
8845 first_hop_htlc_msat: 0,
8846 payment_id: PaymentId([42; 32]),
8848 let dummy_outbound_output = OutboundHTLCOutput {
8851 payment_hash: PaymentHash([43; 32]),
8853 state: OutboundHTLCState::Committed,
8854 source: dummy_htlc_source.clone(),
8855 skimmed_fee_msat: None,
8856 blinding_point: None,
8858 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8859 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8861 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8864 htlc.skimmed_fee_msat = Some(1);
8867 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8869 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8872 payment_hash: PaymentHash([43; 32]),
8873 source: dummy_htlc_source.clone(),
8874 onion_routing_packet: msgs::OnionPacket {
8876 public_key: Ok(test_utils::pubkey(1)),
8877 hop_data: [0; 20*65],
8880 skimmed_fee_msat: None,
8881 blinding_point: None,
8883 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8884 payment_preimage: PaymentPreimage([42; 32]),
8887 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
8888 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
8890 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
8891 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
8893 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
8896 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8897 } else if i % 5 == 1 {
8898 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8899 } else if i % 5 == 2 {
8900 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8901 if let HTLCUpdateAwaitingACK::AddHTLC {
8902 ref mut blinding_point, ref mut skimmed_fee_msat, ..
8903 } = &mut dummy_add {
8904 *blinding_point = Some(test_utils::pubkey(42 + i));
8905 *skimmed_fee_msat = Some(42);
8907 holding_cell_htlc_updates.push(dummy_add);
8908 } else if i % 5 == 3 {
8909 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
8911 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
8914 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8916 // Encode and decode the channel and ensure that the HTLCs within are the same.
8917 let encoded_chan = chan.encode();
8918 let mut s = crate::io::Cursor::new(&encoded_chan);
8919 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8920 let features = channelmanager::provided_channel_type_features(&config);
8921 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8922 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8923 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8926 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8928 fn outbound_commitment_test() {
8929 use bitcoin::sighash;
8930 use bitcoin::consensus::encode::serialize;
8931 use bitcoin::sighash::EcdsaSighashType;
8932 use bitcoin::hashes::hex::FromHex;
8933 use bitcoin::hash_types::Txid;
8934 use bitcoin::secp256k1::Message;
8935 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8936 use crate::ln::PaymentPreimage;
8937 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8938 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8939 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8940 use crate::util::logger::Logger;
8941 use crate::sync::Arc;
8942 use core::str::FromStr;
8943 use hex::DisplayHex;
8945 // Test vectors from BOLT 3 Appendices C and F (anchors):
8946 let feeest = TestFeeEstimator{fee_est: 15000};
8947 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
8948 let secp_ctx = Secp256k1::new();
8950 let mut signer = InMemorySigner::new(
8952 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8953 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8954 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8955 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8956 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8958 // These aren't set in the test vectors:
8959 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8965 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8966 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8967 let keys_provider = Keys { signer: signer.clone() };
8969 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8970 let mut config = UserConfig::default();
8971 config.channel_handshake_config.announced_channel = false;
8972 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
8973 chan.context.holder_dust_limit_satoshis = 546;
8974 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8976 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8978 let counterparty_pubkeys = ChannelPublicKeys {
8979 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8980 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
8981 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8982 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
8983 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
8985 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8986 CounterpartyChannelTransactionParameters {
8987 pubkeys: counterparty_pubkeys.clone(),
8988 selected_contest_delay: 144
8990 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8991 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8993 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8994 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8996 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8997 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8999 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9000 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9002 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9003 // derived from a commitment_seed, so instead we copy it here and call
9004 // build_commitment_transaction.
9005 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9006 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9007 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9008 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9009 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9011 macro_rules! test_commitment {
9012 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9013 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9014 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9018 macro_rules! test_commitment_with_anchors {
9019 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9020 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9021 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9025 macro_rules! test_commitment_common {
9026 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9027 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9029 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9030 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9032 let htlcs = commitment_stats.htlcs_included.drain(..)
9033 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9035 (commitment_stats.tx, htlcs)
9037 let trusted_tx = commitment_tx.trust();
9038 let unsigned_tx = trusted_tx.built_transaction();
9039 let redeemscript = chan.context.get_funding_redeemscript();
9040 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9041 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9042 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9043 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9045 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9046 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9047 let mut counterparty_htlc_sigs = Vec::new();
9048 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9050 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9051 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9052 counterparty_htlc_sigs.push(remote_signature);
9054 assert_eq!(htlcs.len(), per_htlc.len());
9056 let holder_commitment_tx = HolderCommitmentTransaction::new(
9057 commitment_tx.clone(),
9058 counterparty_signature,
9059 counterparty_htlc_sigs,
9060 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9061 chan.context.counterparty_funding_pubkey()
9063 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9064 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9066 let funding_redeemscript = chan.context.get_funding_redeemscript();
9067 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9068 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9070 // ((htlc, counterparty_sig), (index, holder_sig))
9071 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
9074 log_trace!(logger, "verifying htlc {}", $htlc_idx);
9075 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9077 let ref htlc = htlcs[$htlc_idx];
9078 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
9079 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9080 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9081 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9082 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9083 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9084 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9086 let mut preimage: Option<PaymentPreimage> = None;
9089 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9090 if out == htlc.payment_hash {
9091 preimage = Some(PaymentPreimage([i; 32]));
9095 assert!(preimage.is_some());
9098 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
9099 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
9100 channel_derivation_parameters: ChannelDerivationParameters {
9101 value_satoshis: chan.context.channel_value_satoshis,
9102 keys_id: chan.context.channel_keys_id,
9103 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9105 commitment_txid: trusted_tx.txid(),
9106 per_commitment_number: trusted_tx.commitment_number(),
9107 per_commitment_point: trusted_tx.per_commitment_point(),
9108 feerate_per_kw: trusted_tx.feerate_per_kw(),
9110 preimage: preimage.clone(),
9111 counterparty_sig: *htlc_counterparty_sig,
9112 }, &secp_ctx).unwrap();
9113 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9114 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9116 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9117 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9118 let trusted_tx = holder_commitment_tx.trust();
9119 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9120 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9121 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9123 assert!(htlc_counterparty_sig_iter.next().is_none());
9127 // anchors: simple commitment tx with no HTLCs and single anchor
9128 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9129 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9130 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9132 // simple commitment tx with no HTLCs
9133 chan.context.value_to_self_msat = 7000000000;
9135 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9136 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9137 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9139 // anchors: simple commitment tx with no HTLCs
9140 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9141 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9142 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9144 chan.context.pending_inbound_htlcs.push({
9145 let mut out = InboundHTLCOutput{
9147 amount_msat: 1000000,
9149 payment_hash: PaymentHash([0; 32]),
9150 state: InboundHTLCState::Committed,
9152 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9155 chan.context.pending_inbound_htlcs.push({
9156 let mut out = InboundHTLCOutput{
9158 amount_msat: 2000000,
9160 payment_hash: PaymentHash([0; 32]),
9161 state: InboundHTLCState::Committed,
9163 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9166 chan.context.pending_outbound_htlcs.push({
9167 let mut out = OutboundHTLCOutput{
9169 amount_msat: 2000000,
9171 payment_hash: PaymentHash([0; 32]),
9172 state: OutboundHTLCState::Committed,
9173 source: HTLCSource::dummy(),
9174 skimmed_fee_msat: None,
9175 blinding_point: None,
9177 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9180 chan.context.pending_outbound_htlcs.push({
9181 let mut out = OutboundHTLCOutput{
9183 amount_msat: 3000000,
9185 payment_hash: PaymentHash([0; 32]),
9186 state: OutboundHTLCState::Committed,
9187 source: HTLCSource::dummy(),
9188 skimmed_fee_msat: None,
9189 blinding_point: None,
9191 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9194 chan.context.pending_inbound_htlcs.push({
9195 let mut out = InboundHTLCOutput{
9197 amount_msat: 4000000,
9199 payment_hash: PaymentHash([0; 32]),
9200 state: InboundHTLCState::Committed,
9202 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9206 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9207 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9208 chan.context.feerate_per_kw = 0;
9210 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9211 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9212 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9215 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9216 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9217 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9220 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9221 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9222 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9225 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9226 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9227 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9230 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9231 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9232 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9235 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9236 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9237 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9240 // commitment tx with seven outputs untrimmed (maximum feerate)
9241 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9242 chan.context.feerate_per_kw = 647;
9244 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9245 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9246 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9249 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9250 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9251 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9254 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9255 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9256 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9259 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9260 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9261 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9264 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9265 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9266 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9269 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9270 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9271 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9274 // commitment tx with six outputs untrimmed (minimum feerate)
9275 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9276 chan.context.feerate_per_kw = 648;
9278 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9279 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9280 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9283 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9284 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9285 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9288 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9289 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9290 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9293 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9294 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9295 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9298 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9299 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9300 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9303 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9304 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9305 chan.context.feerate_per_kw = 645;
9306 chan.context.holder_dust_limit_satoshis = 1001;
9308 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9309 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9310 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9313 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9314 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9315 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9318 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9319 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9320 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9323 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9324 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9325 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9328 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9329 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9330 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9333 // commitment tx with six outputs untrimmed (maximum feerate)
9334 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9335 chan.context.feerate_per_kw = 2069;
9336 chan.context.holder_dust_limit_satoshis = 546;
9338 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9339 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9340 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9343 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9344 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9345 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9348 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9349 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9350 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9353 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9354 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9355 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9358 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9359 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9360 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9363 // commitment tx with five outputs untrimmed (minimum feerate)
9364 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9365 chan.context.feerate_per_kw = 2070;
9367 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9368 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9369 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9372 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9373 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9374 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9377 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9378 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9379 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9382 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9383 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9384 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9387 // commitment tx with five outputs untrimmed (maximum feerate)
9388 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9389 chan.context.feerate_per_kw = 2194;
9391 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9392 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9393 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9396 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9397 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9398 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9401 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9402 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9403 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9406 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9407 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9408 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9411 // commitment tx with four outputs untrimmed (minimum feerate)
9412 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9413 chan.context.feerate_per_kw = 2195;
9415 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9416 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9417 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9420 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9421 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9422 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9425 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9426 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9427 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9430 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9431 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9432 chan.context.feerate_per_kw = 2185;
9433 chan.context.holder_dust_limit_satoshis = 2001;
9434 let cached_channel_type = chan.context.channel_type;
9435 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9437 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9438 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9439 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9442 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9443 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9444 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9447 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9448 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9449 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9452 // commitment tx with four outputs untrimmed (maximum feerate)
9453 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9454 chan.context.feerate_per_kw = 3702;
9455 chan.context.holder_dust_limit_satoshis = 546;
9456 chan.context.channel_type = cached_channel_type.clone();
9458 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9459 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9460 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9463 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9464 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9465 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9468 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9469 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9470 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9473 // commitment tx with three outputs untrimmed (minimum feerate)
9474 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9475 chan.context.feerate_per_kw = 3703;
9477 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9478 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9479 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9482 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9483 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9484 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9487 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9488 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9489 chan.context.feerate_per_kw = 3687;
9490 chan.context.holder_dust_limit_satoshis = 3001;
9491 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9493 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9494 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9495 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9498 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9499 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9500 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9503 // commitment tx with three outputs untrimmed (maximum feerate)
9504 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9505 chan.context.feerate_per_kw = 4914;
9506 chan.context.holder_dust_limit_satoshis = 546;
9507 chan.context.channel_type = cached_channel_type.clone();
9509 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9510 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9511 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9514 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9515 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9516 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9519 // commitment tx with two outputs untrimmed (minimum feerate)
9520 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9521 chan.context.feerate_per_kw = 4915;
9522 chan.context.holder_dust_limit_satoshis = 546;
9524 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9525 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9526 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9528 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9529 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9530 chan.context.feerate_per_kw = 4894;
9531 chan.context.holder_dust_limit_satoshis = 4001;
9532 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9534 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9535 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9536 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9538 // commitment tx with two outputs untrimmed (maximum feerate)
9539 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9540 chan.context.feerate_per_kw = 9651180;
9541 chan.context.holder_dust_limit_satoshis = 546;
9542 chan.context.channel_type = cached_channel_type.clone();
9544 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9545 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9546 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9548 // commitment tx with one output untrimmed (minimum feerate)
9549 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9550 chan.context.feerate_per_kw = 9651181;
9552 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9553 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9554 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9556 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9557 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9558 chan.context.feerate_per_kw = 6216010;
9559 chan.context.holder_dust_limit_satoshis = 4001;
9560 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9562 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9563 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9564 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9566 // commitment tx with fee greater than funder amount
9567 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9568 chan.context.feerate_per_kw = 9651936;
9569 chan.context.holder_dust_limit_satoshis = 546;
9570 chan.context.channel_type = cached_channel_type;
9572 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9573 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9574 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9576 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9577 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9578 chan.context.feerate_per_kw = 253;
9579 chan.context.pending_inbound_htlcs.clear();
9580 chan.context.pending_inbound_htlcs.push({
9581 let mut out = InboundHTLCOutput{
9583 amount_msat: 2000000,
9585 payment_hash: PaymentHash([0; 32]),
9586 state: InboundHTLCState::Committed,
9588 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9591 chan.context.pending_outbound_htlcs.clear();
9592 chan.context.pending_outbound_htlcs.push({
9593 let mut out = OutboundHTLCOutput{
9595 amount_msat: 5000001,
9597 payment_hash: PaymentHash([0; 32]),
9598 state: OutboundHTLCState::Committed,
9599 source: HTLCSource::dummy(),
9600 skimmed_fee_msat: None,
9601 blinding_point: None,
9603 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9606 chan.context.pending_outbound_htlcs.push({
9607 let mut out = OutboundHTLCOutput{
9609 amount_msat: 5000000,
9611 payment_hash: PaymentHash([0; 32]),
9612 state: OutboundHTLCState::Committed,
9613 source: HTLCSource::dummy(),
9614 skimmed_fee_msat: None,
9615 blinding_point: None,
9617 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9621 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9622 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9623 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9626 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9627 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9628 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9630 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9631 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9632 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9634 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9635 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9636 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9639 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9640 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9641 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9642 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9645 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9646 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9647 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9649 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9650 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9651 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9653 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9654 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9655 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9660 fn test_per_commitment_secret_gen() {
9661 // Test vectors from BOLT 3 Appendix D:
9663 let mut seed = [0; 32];
9664 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9665 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9666 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9668 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9669 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9670 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9672 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9673 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9675 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9676 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9678 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9679 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9680 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9684 fn test_key_derivation() {
9685 // Test vectors from BOLT 3 Appendix E:
9686 let secp_ctx = Secp256k1::new();
9688 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9689 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9691 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9692 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9694 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9695 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9697 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9698 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9700 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9701 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9703 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9704 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9708 fn test_zero_conf_channel_type_support() {
9709 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9710 let secp_ctx = Secp256k1::new();
9711 let seed = [42; 32];
9712 let network = Network::Testnet;
9713 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9714 let logger = test_utils::TestLogger::new();
9716 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9717 let config = UserConfig::default();
9718 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9719 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9721 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9722 channel_type_features.set_zero_conf_required();
9724 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9725 open_channel_msg.channel_type = Some(channel_type_features);
9726 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9727 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9728 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9729 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9730 assert!(res.is_ok());
9734 fn test_supports_anchors_zero_htlc_tx_fee() {
9735 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9736 // resulting `channel_type`.
9737 let secp_ctx = Secp256k1::new();
9738 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9739 let network = Network::Testnet;
9740 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9741 let logger = test_utils::TestLogger::new();
9743 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9744 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9746 let mut config = UserConfig::default();
9747 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9749 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9750 // need to signal it.
9751 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9752 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9753 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9754 &config, 0, 42, None
9756 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9758 let mut expected_channel_type = ChannelTypeFeatures::empty();
9759 expected_channel_type.set_static_remote_key_required();
9760 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9762 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9763 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9764 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9768 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9769 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9770 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9771 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9772 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9775 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9776 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9780 fn test_rejects_implicit_simple_anchors() {
9781 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9782 // each side's `InitFeatures`, it is rejected.
9783 let secp_ctx = Secp256k1::new();
9784 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9785 let network = Network::Testnet;
9786 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9787 let logger = test_utils::TestLogger::new();
9789 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9790 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9792 let config = UserConfig::default();
9794 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9795 let static_remote_key_required: u64 = 1 << 12;
9796 let simple_anchors_required: u64 = 1 << 20;
9797 let raw_init_features = static_remote_key_required | simple_anchors_required;
9798 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9800 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9801 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9802 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9806 // Set `channel_type` to `None` to force the implicit feature negotiation.
9807 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9808 open_channel_msg.channel_type = None;
9810 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9811 // `static_remote_key`, it will fail the channel.
9812 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9813 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9814 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9815 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9817 assert!(channel_b.is_err());
9821 fn test_rejects_simple_anchors_channel_type() {
9822 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9824 let secp_ctx = Secp256k1::new();
9825 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9826 let network = Network::Testnet;
9827 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9828 let logger = test_utils::TestLogger::new();
9830 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9831 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9833 let config = UserConfig::default();
9835 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9836 let static_remote_key_required: u64 = 1 << 12;
9837 let simple_anchors_required: u64 = 1 << 20;
9838 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9839 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9840 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9841 assert!(!simple_anchors_init.requires_unknown_bits());
9842 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9844 // First, we'll try to open a channel between A and B where A requests a channel type for
9845 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9846 // B as it's not supported by LDK.
9847 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9848 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9849 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9853 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9854 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9856 let res = InboundV1Channel::<&TestKeysInterface>::new(
9857 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9858 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9859 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9861 assert!(res.is_err());
9863 // Then, we'll try to open another channel where A requests a channel type for
9864 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9865 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9867 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9868 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9869 10000000, 100000, 42, &config, 0, 42, None
9872 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9874 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9875 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9876 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9877 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9880 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9881 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9883 let res = channel_a.accept_channel(
9884 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9886 assert!(res.is_err());
9890 fn test_waiting_for_batch() {
9891 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9892 let logger = test_utils::TestLogger::new();
9893 let secp_ctx = Secp256k1::new();
9894 let seed = [42; 32];
9895 let network = Network::Testnet;
9896 let best_block = BestBlock::from_network(network);
9897 let chain_hash = ChainHash::using_genesis_block(network);
9898 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9900 let mut config = UserConfig::default();
9901 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9902 // channel in a batch before all channels are ready.
9903 config.channel_handshake_limits.trust_own_funding_0conf = true;
9905 // Create a channel from node a to node b that will be part of batch funding.
9906 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9907 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9912 &channelmanager::provided_init_features(&config),
9922 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9923 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9924 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9929 &channelmanager::provided_channel_type_features(&config),
9930 &channelmanager::provided_init_features(&config),
9936 true, // Allow node b to send a 0conf channel_ready.
9939 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9940 node_a_chan.accept_channel(
9941 &accept_channel_msg,
9942 &config.channel_handshake_limits,
9943 &channelmanager::provided_init_features(&config),
9946 // Fund the channel with a batch funding transaction.
9947 let output_script = node_a_chan.context.get_funding_redeemscript();
9948 let tx = Transaction {
9950 lock_time: LockTime::ZERO,
9954 value: 10000000, script_pubkey: output_script.clone(),
9957 value: 10000000, script_pubkey: Builder::new().into_script(),
9960 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9961 let funding_created_msg = node_a_chan.get_funding_created(
9962 tx.clone(), funding_outpoint, true, &&logger,
9963 ).map_err(|_| ()).unwrap();
9964 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9965 &funding_created_msg.unwrap(),
9969 ).map_err(|_| ()).unwrap();
9970 let node_b_updates = node_b_chan.monitor_updating_restored(
9978 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9979 // broadcasting the funding transaction until the batch is ready.
9980 let res = node_a_chan.funding_signed(
9981 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
9983 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9984 let node_a_updates = node_a_chan.monitor_updating_restored(
9991 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9992 // as the funding transaction depends on all channels in the batch becoming ready.
9993 assert!(node_a_updates.channel_ready.is_none());
9994 assert!(node_a_updates.funding_broadcastable.is_none());
9995 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
9997 // It is possible to receive a 0conf channel_ready from the remote node.
9998 node_a_chan.channel_ready(
9999 &node_b_updates.channel_ready.unwrap(),
10007 node_a_chan.context.channel_state,
10008 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10011 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10012 node_a_chan.set_batch_ready();
10013 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10014 assert!(node_a_chan.check_get_channel_ready(0).is_some());